Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
153 changes: 20 additions & 133 deletions docs/docsgen/source/api/helper.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,136 +6,9 @@
.. currentmodule:: onnx.helper
```

```{eval-rst}
.. autosummary::

find_min_ir_version_for
get_all_tensor_dtypes
get_attribute_value
get_node_attr_value
set_metadata_props
set_model_props
float32_to_bfloat16
float32_to_float8e4m3
float32_to_float8e5m2
make_attribute
make_attribute_ref
make_empty_tensor_value_info
make_function
make_graph
make_map
make_map_type_proto
make_model
make_node
make_operatorsetid
make_opsetid
make_model_gen_version
make_optional
make_optional_type_proto
make_sequence
make_sequence_type_proto
make_sparse_tensor
make_sparse_tensor_type_proto
make_sparse_tensor_value_info
make_tensor
make_tensor_sequence_value_info
make_tensor_type_proto
make_training_info
make_tensor_value_info
make_value_info
np_dtype_to_tensor_dtype
printable_attribute
printable_dim
printable_graph
printable_node
printable_tensor_proto
printable_type
printable_value_info
split_complex_to_pairs
create_op_set_id_version_map
strip_doc_string
pack_float32_to_4bit
tensor_dtype_to_np_dtype
tensor_dtype_to_storage_tensor_dtype
tensor_dtype_to_string
tensor_dtype_to_field
```

## getter

```{eval-rst}
.. autofunction:: onnx.helper.get_attribute_value
```

```{eval-rst}
.. autofunction:: onnx.helper.get_node_attr_value
```

## setter

```{eval-rst}
.. autofunction:: onnx.helper.set_metadata_props
```

```{eval-rst}
.. autofunction:: onnx.helper.set_model_props
```

## print

```{eval-rst}
.. autofunction:: onnx.helper.printable_attribute
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_dim
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_graph
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_node
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_tensor_proto
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_type
```

```{eval-rst}
.. autofunction:: onnx.helper.printable_value_info
```

## tools

```{eval-rst}
.. autofunction:: onnx.helper.find_min_ir_version_for
```

```{eval-rst}
.. autofunction:: onnx.helper.split_complex_to_pairs
```

```{eval-rst}
.. autofunction:: onnx.helper.create_op_set_id_version_map
```

```{eval-rst}
.. autofunction:: onnx.helper.strip_doc_string
```

```{eval-rst}
.. autofunction:: onnx.helper.pack_float32_to_4bit
```

(l-onnx-make-function)=

## make function
## Helper functions to make ONNX graph components

All functions used to create an ONNX graph.

Expand Down Expand Up @@ -239,7 +112,7 @@ All functions used to create an ONNX graph.
.. autofunction:: onnx.helper.make_value_info
```

## type mappings
## Type Mappings

```{eval-rst}
.. autofunction:: onnx.helper.get_all_tensor_dtypes
Expand All @@ -265,16 +138,30 @@ All functions used to create an ONNX graph.
.. autofunction:: onnx.helper.tensor_dtype_to_string
```

## cast
## Tools

```{eval-rst}
.. autofunction:: onnx.helper.float32_to_bfloat16
.. autofunction:: onnx.helper.find_min_ir_version_for
```

```{eval-rst}
.. autofunction:: onnx.helper.float32_to_float8e4m3
.. autofunction:: onnx.helper.create_op_set_id_version_map
```

## Other functions

```{eval-rst}
.. autofunction:: onnx.helper.float32_to_float8e5m2
.. autosummary::

get_attribute_value
get_node_attr_value
set_metadata_props
set_model_props
printable_attribute
printable_dim
printable_graph
printable_node
printable_tensor_proto
printable_type
printable_value_info
```
35 changes: 0 additions & 35 deletions docs/docsgen/source/api/numpy_helper.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
```{eval-rst}
.. autosummary::

bfloat16_to_float32
float8e4m3_to_float32
float8e5m2_to_float32
from_array
from_dict
from_list
Expand Down Expand Up @@ -65,35 +62,3 @@ these two functions use a custom dtype defined in :mod:`onnx._custom_element_typ
```{eval-rst}
.. autofunction:: onnx.numpy_helper.from_optional
```

## tools

```{eval-rst}
.. autofunction:: onnx.numpy_helper.convert_endian
```

```{eval-rst}
.. autofunction:: onnx.numpy_helper.combine_pairs_to_complex
```

```{eval-rst}
.. autofunction:: onnx.numpy_helper.create_random_int
```

```{eval-rst}
.. autofunction:: onnx.numpy_helper.unpack_int4
```

## cast

```{eval-rst}
.. autofunction:: onnx.numpy_helper.bfloat16_to_float32
```

```{eval-rst}
.. autofunction:: onnx.numpy_helper.float8e4m3_to_float32
```

```{eval-rst}
.. autofunction:: onnx.numpy_helper.float8e5m2_to_float32
```
69 changes: 55 additions & 14 deletions onnx/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import google.protobuf.message
import numpy as np
import typing_extensions

import onnx._custom_element_types as custom_np_types
from onnx import (
Expand Down Expand Up @@ -356,20 +357,28 @@
set_metadata_props(model, dict_value)


def split_complex_to_pairs(ca: Sequence[np.complex64]) -> Sequence[int]:
def _split_complex_to_pairs(ca: Sequence[np.complex64]) -> Sequence[int]:
return [
(ca[i // 2].real if (i % 2 == 0) else ca[i // 2].imag) # type: ignore[misc]
for i in range(len(ca) * 2)
]


@typing_extensions.deprecated(
"Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion",
category=FutureWarning,
)
def float32_to_bfloat16(*args, **kwargs) -> int:
return _float32_to_bfloat16(*args, **kwargs)


# convert a float32 value to a bfloat16 (as int)
# By default, this conversion rounds-to-nearest-even and supports NaN
# Setting `truncate` to True enables a simpler conversion. In this mode the
# conversion is performed by simply dropping the 2 least significant bytes of
# the significand. In this mode an error of up to 1 bit may be introduced and
# preservation of NaN values is not be guaranteed.
def float32_to_bfloat16(fval: float, truncate: bool = False) -> int:
def _float32_to_bfloat16(fval: float, truncate: bool = False) -> int:
ival = int.from_bytes(struct.pack("<f", fval), "little")
if truncate:
return ival >> 16
Expand All @@ -382,7 +391,15 @@
return (ival + rounded) >> 16


def float32_to_float8e4m3( # noqa: PLR0911
@typing_extensions.deprecated(
"Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion",
category=FutureWarning,
)
def float32_to_float8e4m3(*args, **kwargs) -> int:
return _float32_to_float8e4m3(*args, **kwargs)


def _float32_to_float8e4m3( # noqa: PLR0911
fval: float,
scale: float = 1.0,
fn: bool = True,
Expand Down Expand Up @@ -516,7 +533,15 @@
return int(ret)


def float32_to_float8e5m2( # noqa: PLR0911
@typing_extensions.deprecated(
"Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion",
category=FutureWarning,
)
def float32_to_float8e5m2(*args: Any, **kwargs: Any) -> int:
return _float32_to_float8e5m2(*args, **kwargs)


def _float32_to_float8e5m2( # noqa: PLR0911
fval: float,
scale: float = 1.0,
fn: bool = False,
Expand Down Expand Up @@ -642,7 +667,15 @@
raise NotImplementedError("fn and uz must be both False or True.")


@typing_extensions.deprecated(
"Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion",
category=FutureWarning,
)
def pack_float32_to_4bit(array: np.ndarray | Sequence, signed: bool) -> np.ndarray:
return _pack_float32_to_4bit(array, signed)

Check warning on line 675 in onnx/helper.py

View check run for this annotation

Codecov / codecov/patch

onnx/helper.py#L675

Added line #L675 was not covered by tests


def _pack_float32_to_4bit(array: np.ndarray | Sequence, signed: bool) -> np.ndarray:
"""Convert an array of float32 value to a 4bit data-type and pack every two concecutive elements in a byte.
See :ref:`onnx-detail-int4` for technical details.

Expand All @@ -662,15 +695,23 @@
array_flat = np.append(array_flat, np.array([0]))

def single_func(x, y) -> np.ndarray:
return subbyte.float32x2_to_4bitx2(x, y, signed)
return subbyte._float32x2_to_4bitx2(x, y, signed)

func = np.frompyfunc(single_func, 2, 1)

arr: np.ndarray = func(array_flat[0::2], array_flat[1::2])
return arr.astype(np.uint8)


@typing_extensions.deprecated(
"Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion",
category=FutureWarning,
)
def pack_float32_to_float4e2m1(array: np.ndarray | Sequence) -> np.ndarray:
return _pack_float32_to_float4e2m1(array)

Check warning on line 711 in onnx/helper.py

View check run for this annotation

Codecov / codecov/patch

onnx/helper.py#L711

Added line #L711 was not covered by tests


def _pack_float32_to_float4e2m1(array: np.ndarray | Sequence) -> np.ndarray:
"""Convert an array of float32 value to float4e2m1 and pack every two concecutive elements in a byte.
See :ref:`onnx-detail-float4` for technical details.

Expand All @@ -688,7 +729,7 @@
if is_odd_volume:
array_flat = np.append(array_flat, np.array([0]))

arr = subbyte.float32x2_to_float4e2m1x2(array_flat[0::2], array_flat[1::2])
arr = subbyte._float32x2_to_float4e2m1x2(array_flat[0::2], array_flat[1::2])
return arr.astype(np.uint8)


Expand Down Expand Up @@ -759,7 +800,7 @@
tensor.raw_data = vals
else:
if data_type in (TensorProto.COMPLEX64, TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
vals = _split_complex_to_pairs(vals)

Check warning on line 803 in onnx/helper.py

View check run for this annotation

Codecov / codecov/patch

onnx/helper.py#L803

Added line #L803 was not covered by tests
elif data_type == TensorProto.FLOAT16:
vals = (
np.array(vals).astype(np_dtype).view(dtype=np.uint16).flatten().tolist()
Expand All @@ -772,13 +813,13 @@
TensorProto.FLOAT8E5M2FNUZ,
):
fcast = {
TensorProto.BFLOAT16: float32_to_bfloat16,
TensorProto.FLOAT8E4M3FN: float32_to_float8e4m3,
TensorProto.FLOAT8E4M3FNUZ: lambda *args: float32_to_float8e4m3( # type: ignore[misc]
TensorProto.BFLOAT16: _float32_to_bfloat16,
TensorProto.FLOAT8E4M3FN: _float32_to_float8e4m3,
TensorProto.FLOAT8E4M3FNUZ: lambda *args: _float32_to_float8e4m3( # type: ignore[misc]
*args, uz=True
),
TensorProto.FLOAT8E5M2: float32_to_float8e5m2,
TensorProto.FLOAT8E5M2FNUZ: lambda *args: float32_to_float8e5m2( # type: ignore[misc]
TensorProto.FLOAT8E5M2: _float32_to_float8e5m2,
TensorProto.FLOAT8E5M2FNUZ: lambda *args: _float32_to_float8e5m2( # type: ignore[misc]
*args, fn=True, uz=True
),
}[
Expand All @@ -801,9 +842,9 @@
# to uint8 regardless of the value of 'signed'. Using int8 would cause
# the size of int4 tensors to increase ~5x if the tensor contains negative values (due to
# the way negative values are serialized by protobuf).
vals = pack_float32_to_4bit(vals, signed=signed).flatten().tolist()
vals = _pack_float32_to_4bit(vals, signed=signed).flatten().tolist()
elif data_type == TensorProto.FLOAT4E2M1:
vals = pack_float32_to_float4e2m1(vals).flatten().tolist()
vals = _pack_float32_to_float4e2m1(vals).flatten().tolist()
elif data_type == TensorProto.BOOL:
vals = np.array(vals).astype(int)
elif data_type == TensorProto.STRING:
Expand Down
Loading
Loading