Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions torchao/dtypes/affine_quantized_tensor_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ def deregister_aqt_quantized_linear_dispatch(dispatch_condition):
if dispatch_condition in _AQT_QLINEAR_DISPATCH_TABLE:
del _AQT_QLINEAR_DISPATCH_TABLE[dispatch_condition]
else:
logger.warn(
f"Attempting to remove non-existant dispatch condition {dispatch_condition}"
logger.warning(
f"Attempting to remove non-existent dispatch condition {dispatch_condition}"
)


Expand Down Expand Up @@ -274,7 +274,7 @@ def _(func, types, args, kwargs):
try:
return weight_tensor._quantized_linear_op(input_tensor, weight_tensor, bias)
except QuantizedLinearNotImplementedError as e:
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
if (
isinstance(weight_tensor, AffineQuantizedTensor)
and hasattr(weight_tensor._layout, "quantized_linear_impl")
Expand Down Expand Up @@ -363,7 +363,7 @@ def _(func, types, args, kwargs):
input_tensor, transposed_weight_tensor, bias
)
except QuantizedLinearNotImplementedError as e:
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
if (
isinstance(weight_tensor, AffineQuantizedTensor)
and hasattr(weight_tensor._layout, "quantized_linear_impl")
Expand Down Expand Up @@ -397,7 +397,7 @@ def _(func, types, args, kwargs):
input_tensor, transposed_weight_tensor, bias
)
except QuantizedLinearNotImplementedError as e:
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
if (
isinstance(weight_tensor, AffineQuantizedTensor)
and hasattr(weight_tensor._layout, "quantized_linear_impl")
Expand Down
2 changes: 1 addition & 1 deletion torchao/quantization/quant_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1830,7 +1830,7 @@ def _uintx_weight_only_transform(

if use_hqq:
if dtype == torch.uint4:
logger.warn(
logger.warning(
"Recommended to use `int4_weight_only(group_size, use_hqq=True)` for the best performance"
)
quant_min, quant_max = _DTYPE_TO_QVALUE_BOUNDS[dtype]
Expand Down
Loading