Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit ae423bd

Browse files
jaingauravtensorflower-gardener
authored andcommitted
Rollback: Add uint32 & uint64 to TF_CALL_INTEGRAL_TYPES
PiperOrigin-RevId: 317488920 Change-Id: I65736e7f4a1004ff634194343dc4ec237a227a19
1 parent c654edb commit ae423bd

27 files changed

+63
-71
lines changed

tensorflow/core/framework/register_types.h

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -153,9 +153,16 @@ limitations under the License.
153153
#endif // defined(IS_MOBILE_PLATFORM) - end of TF_CALL_type defines
154154

155155
// Defines for sets of types.
156-
#define TF_CALL_INTEGRAL_TYPES(m) \
157-
TF_CALL_uint64(m) TF_CALL_int64(m) TF_CALL_uint32(m) TF_CALL_int32(m) \
158-
TF_CALL_uint16(m) TF_CALL_int16(m) TF_CALL_uint8(m) TF_CALL_int8(m)
156+
157+
// TODO(b/111604096): Add uint32 and uint64 to TF_CALL_INTEGRAL_TYPES.
158+
//
159+
// The uint32 and uint64 types were introduced in 10/2017 to be used via XLA and
160+
// thus were not included in TF_CALL_INTEGRAL_TYPES. Including them in
161+
// TF_CALL_INTEGRAL_TYPES should only happen after evaluating the effect on the
162+
// TF binary size and performance.
163+
#define TF_CALL_INTEGRAL_TYPES(m) \
164+
TF_CALL_int64(m) TF_CALL_int32(m) TF_CALL_uint16(m) TF_CALL_int16(m) \
165+
TF_CALL_uint8(m) TF_CALL_int8(m)
159166

160167
#define TF_CALL_FLOAT_TYPES(m) \
161168
TF_CALL_half(m) TF_CALL_bfloat16(m) TF_CALL_float(m) TF_CALL_double(m)
@@ -167,10 +174,10 @@ limitations under the License.
167174
#define TF_CALL_REAL_NUMBER_TYPES_NO_BFLOAT16(m) \
168175
TF_CALL_INTEGRAL_TYPES(m) TF_CALL_half(m) TF_CALL_float(m) TF_CALL_double(m)
169176

170-
#define TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m) \
171-
TF_CALL_half(m) TF_CALL_bfloat16(m) TF_CALL_float(m) TF_CALL_double(m) \
172-
TF_CALL_uint64(m) TF_CALL_int64(m) TF_CALL_uint32(m) TF_CALL_uint16(m) \
173-
TF_CALL_int16(m) TF_CALL_uint8(m) TF_CALL_int8(m)
177+
#define TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m) \
178+
TF_CALL_half(m) TF_CALL_bfloat16(m) TF_CALL_float(m) TF_CALL_double(m) \
179+
TF_CALL_int64(m) TF_CALL_uint16(m) TF_CALL_int16(m) TF_CALL_uint8(m) \
180+
TF_CALL_int8(m)
174181

175182
#define TF_CALL_COMPLEX_TYPES(m) TF_CALL_complex64(m) TF_CALL_complex128(m)
176183

tensorflow/core/framework/types.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,11 @@ int DataTypeSize(DataType dt) {
238238
TF_CALL_qint16(CASE);
239239
TF_CALL_quint16(CASE);
240240

241+
// uint32 and uint64 aren't included in TF_CALL_POD_TYPES because we
242+
// don't want to define kernels for them at this stage to avoid binary
243+
// bloat.
244+
TF_CALL_uint32(CASE);
245+
TF_CALL_uint64(CASE);
241246
default:
242247
return 0;
243248
}

tensorflow/core/kernels/BUILD

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4919,9 +4919,7 @@ tf_kernel_library(
49194919
"topk_op_gpu_double.cu.cc",
49204920
"topk_op_gpu_float.cu.cc",
49214921
"topk_op_gpu_half.cu.cc",
4922-
"topk_op_gpu_uint64.cu.cc",
49234922
"topk_op_gpu_int64.cu.cc",
4924-
"topk_op_gpu_uint32.cu.cc",
49254923
"topk_op_gpu_int32.cu.cc",
49264924
"topk_op_gpu_int16.cu.cc",
49274925
"topk_op_gpu_uint16.cu.cc",

tensorflow/core/kernels/concat_lib_cpu.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,8 @@ REGISTER(qint8)
116116
REGISTER(quint16)
117117
REGISTER(qint16)
118118
REGISTER(qint32)
119+
REGISTER(uint32)
120+
REGISTER(uint64)
119121

120122
#if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION) && \
121123
!defined(__ANDROID_TYPES_FULL__)

tensorflow/core/kernels/concat_op.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,8 @@ REGISTER_CONCAT(qint8);
208208
REGISTER_CONCAT(quint16);
209209
REGISTER_CONCAT(qint16);
210210
REGISTER_CONCAT(qint32);
211+
REGISTER_CONCAT(uint32);
212+
REGISTER_CONCAT(uint64);
211213

212214
#undef REGISTER_CONCAT
213215

tensorflow/core/kernels/constant_op.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,7 @@ TF_CALL_ALL_TYPES(REGISTER_CPU_KERNEL);
211211
// the conversion from uint8 to quint8.
212212
REGISTER_KERNEL(CPU, quint8);
213213
REGISTER_KERNEL(CPU, quint16);
214+
REGISTER_KERNEL(CPU, uint32);
214215
#undef REGISTER_CPU_KERNEL
215216

216217
#ifdef TENSORFLOW_USE_SYCL

tensorflow/core/kernels/control_flow_ops.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,12 +101,16 @@ TF_CALL_ALL_TYPES(REGISTER_CPU_SWITCH);
101101
TF_CALL_ALL_TYPES(REGISTER_CPU_REF_SWITCH);
102102
TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_SWITCH);
103103
TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_REF_SWITCH);
104+
REGISTER_CPU_SWITCH(uint64);
104105

105106
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_SWITCH);
106107
TF_CALL_QUANTIZED_TYPES(REGISTER_GPU_SWITCH);
107108
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_REF_SWITCH);
108109
TF_CALL_QUANTIZED_TYPES(REGISTER_GPU_REF_SWITCH);
110+
REGISTER_GPU_SWITCH(uint64);
109111
TF_CALL_variant(REGISTER_GPU_SWITCH);
112+
TF_CALL_uint32(REGISTER_GPU_SWITCH);
113+
TF_CALL_uint32(REGISTER_GPU_REF_SWITCH);
110114
TF_CALL_bool(REGISTER_GPU_SWITCH);
111115
TF_CALL_bool(REGISTER_GPU_REF_SWITCH);
112116

@@ -307,6 +311,7 @@ TF_CALL_QUANTIZED_TYPES(REGISTER_GPU_KERNEL);
307311
TF_CALL_QUANTIZED_TYPES(REGISTER_GPU_REF_KERNEL);
308312
REGISTER_GPU_KERNEL(bool);
309313
REGISTER_GPU_REF_KERNEL(bool);
314+
REGISTER_GPU_KERNEL(uint64);
310315
TF_CALL_variant(REGISTER_GPU_KERNEL);
311316

312317
#undef REGISTER_GPU_KERNEL

tensorflow/core/kernels/data/dataset_test_base.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,8 @@ Status DatasetOpsTestBase::ExpectEqual(const Tensor& a, const Tensor& b) {
220220
break;
221221
TF_CALL_NUMBER_TYPES(CASE);
222222
TF_CALL_tstring(CASE);
223+
TF_CALL_uint32(CASE);
224+
TF_CALL_uint64(CASE);
223225
// TODO(feihugis): figure out how to support variant tensors.
224226
#undef CASE
225227
default:

tensorflow/core/kernels/dense_update_ops.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ typedef Eigen::SyclDevice SYCLDevice;
9898

9999
TF_CALL_ALL_TYPES(REGISTER_KERNELS);
100100
// uint32 not included in ALL_TYPES
101+
TF_CALL_uint32(REGISTER_KERNELS);
101102
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
102103
// quint16 not included in QUANTIZIED_TYPES
103104
TF_CALL_quint16(REGISTER_KERNELS);

tensorflow/core/kernels/dynamic_partition_op.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,8 @@ class DynamicPartitionOp : public DynamicPartitionOp_Shared {
164164
DynamicPartitionOp<T>)
165165

166166
TF_CALL_ALL_TYPES(REGISTER_DYNAMIC_PARTITION);
167+
// For partitioning fingerprints.
168+
TF_CALL_uint64(REGISTER_DYNAMIC_PARTITION);
167169
#undef REGISTER_DYNAMIC_PARTITION
168170

169171
} // namespace tensorflow

0 commit comments

Comments
 (0)