diff --git a/RELEASE.md b/RELEASE.md index 0e27725431867e..7b4186050d415e 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,60 @@ +# Release 2.5.3 + +This releases introduces several vulnerability fixes: + +* Fixes a floating point division by 0 when executing convolution operators ([CVE-2022-21725](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21725)) +* Fixes a heap OOB read in shape inference for `ReverseSequence` ([CVE-2022-21728](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21728)) +* Fixes a heap OOB access in `Dequantize` ([CVE-2022-21726](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21726)) +* Fixes an integer overflow in shape inference for `Dequantize` ([CVE-2022-21727](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21727)) +* Fixes a heap OOB access in `FractionalAvgPoolGrad` ([CVE-2022-21730](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21730)) +* Fixes an overflow and divide by zero in `UnravelIndex` ([CVE-2022-21729](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21729)) +* Fixes a type confusion in shape inference for `ConcatV2` ([CVE-2022-21731](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21731)) +* Fixes an OOM in `ThreadPoolHandle` ([CVE-2022-21732](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21732)) +* Fixes an OOM due to integer overflow in `StringNGrams` ([CVE-2022-21733](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21733)) +* Fixes more issues caused by incomplete validation in boosted trees code ([CVE-2021-41208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41208)) +* Fixes an integer overflows in most sparse component-wise ops ([CVE-2022-23567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23567)) +* Fixes an integer overflows in `AddManySparseToTensorsMap` ([CVE-2022-23568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23568)) +* Fixes a number of `CHECK`-failures in `MapStage` ([CVE-2022-21734](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21734)) +* Fixes a division by zero in `FractionalMaxPool` ([CVE-2022-21735](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21735)) +* Fixes a number of `CHECK`-fails when building invalid/overflowing tensor shapes ([CVE-2022-23569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23569)) +* Fixes an undefined behavior in `SparseTensorSliceDataset` ([CVE-2022-21736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21736)) +* Fixes an assertion failure based denial of service via faulty bin count operations ([CVE-2022-21737](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21737)) +* Fixes a reference binding to null pointer in `QuantizedMaxPool` ([CVE-2022-21739](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21739)) +* Fixes an integer overflow leading to crash in `SparseCountSparseOutput` ([CVE-2022-21738](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21738)) +* Fixes a heap overflow in `SparseCountSparseOutput` ([CVE-2022-21740](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21740)) +* Fixes an FPE in `BiasAndClamp` in TFLite ([CVE-2022-23557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23557)) +* Fixes an FPE in depthwise convolutions in TFLite ([CVE-2022-21741](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21741)) +* Fixes an integer overflow in TFLite array creation ([CVE-2022-23558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23558)) +* Fixes an integer overflow in TFLite ([CVE-2022-23559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23559)) +* Fixes a dangerous OOB write in TFLite ([CVE-2022-23561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23561)) +* Fixes a vulnerability leading to read and write outside of bounds in TFLite ([CVE-2022-23560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23560)) +* Fixes a set of vulnerabilities caused by using insecure temporary files ([CVE-2022-23563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23563)) +* Fixes an integer overflow in Range resulting in undefined behavior and OOM ([CVE-2022-23562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23562)) +* Fixes a vulnerability where missing validation causes `tf.sparse.split` to crash when `axis` is a tuple ([CVE-2021-41206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41206)) +* Fixes a `CHECK`-fail when decoding resource handles from proto ([CVE-2022-23564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23564)) +* Fixes a `CHECK`-fail with repeated `AttrDef` ([CVE-2022-23565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23565)) +* Fixes a heap OOB write in Grappler ([CVE-2022-23566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23566)) +* Fixes a `CHECK`-fail when decoding invalid tensors from proto ([CVE-2022-23571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23571)) +* Fixes an unitialized variable access in `AssignOp` ([CVE-2022-23573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23573)) +* Fixes an integer overflow in `OpLevelCostEstimator::CalculateTensorSize` ([CVE-2022-23575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23575)) +* Fixes an integer overflow in `OpLevelCostEstimator::CalculateOutputSize` ([CVE-2022-23576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23576)) +* Fixes a null dereference in `GetInitOp` ([CVE-2022-23577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23577)) +* Fixes a memory leak when a graph node is invalid ([CVE-2022-23578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23578)) +* Fixes an abort caused by allocating a vector that is too large ([CVE-2022-23580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23580)) +* Fixes multiple `CHECK`-failures during Grappler's `IsSimplifiableReshape` ([CVE-2022-23581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23581)) +* Fixes multiple `CHECK`-failures during Grappler's `SafeToRemoveIdentity` ([CVE-2022-23579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23579)) +* Fixes multiple `CHECK`-failures in `TensorByteSize` ([CVE-2022-23582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23582)) +* Fixes multiple `CHECK`-failures in binary ops due to type confusion ([CVE-2022-23583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23583)) +* Fixes a use after free in `DecodePng` kernel ([CVE-2022-23584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23584)) +* Fixes a memory leak in decoding PNG images ([CVE-2022-23585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23585)) +* Fixes multiple `CHECK`-fails in `function.cc` ([CVE-2022-23586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23586)) +* Fixes multiple `CHECK`-fails due to attempting to build a reference tensor ([CVE-2022-23588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23588)) +* Fixes an integer overflow in Grappler cost estimation of crop and resize operation ([CVE-2022-23587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23587)) +* Fixes a null pointer dereference in Grappler's `IsConstant` ([CVE-2022-23589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23589)) +* Fixes a `CHECK` failure in constant folding ([CVE-2021-41197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41197)) +* Fixes a stack overflow due to self-recursive function in `GraphDef` ([CVE-2022-23591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23591)) +* Updates `icu` to `69.1` to handle [CVE-2020-10531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10531) + # Release 2.5.2 This release introduces several vulnerability fixes: diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index dcd652d9fdf3d6..dcab34cc965e0f 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -23,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/monitoring/counter.h" @@ -95,6 +96,19 @@ static Status ValidateNode(const NodeDef& node) { return Status::OK(); } +static Status ValidateFunctionNotRecursive(const FunctionDef& function) { + const auto& function_name = function.signature().name(); + for (const auto& node : function.node_def()) { + if (node.op() == function_name) { + return errors::FailedPrecondition( + "Function ", function_name, + " is self recursive and TensorFlow does not support this scenario."); + } + } + + return Status::OK(); +} + static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { TF_RETURN_IF_ERROR(ValidateNode(node)); @@ -106,6 +120,10 @@ static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : function.node_def()) { TF_RETURN_IF_ERROR(ValidateNode(node)); } + + // Also check that there is no recursivity in the library + // TODO(mihaimaruseac): Do more than self-recursivity + TF_RETURN_IF_ERROR(ValidateFunctionNotRecursive(function)); } } diff --git a/tensorflow/cc/saved_model/loader_util.cc b/tensorflow/cc/saved_model/loader_util.cc index 100cae2291333f..411dc41fd44837 100644 --- a/tensorflow/cc/saved_model/loader_util.cc +++ b/tensorflow/cc/saved_model/loader_util.cc @@ -34,9 +34,14 @@ Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def, const auto& init_op_sig_it = meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey); if (init_op_sig_it != sig_def_map.end()) { - *init_op_name = init_op_sig_it->second.outputs() - .find(kSavedModelInitOpSignatureKey) - ->second.name(); + const auto& sig_def_outputs = init_op_sig_it->second.outputs(); + const auto& sig_def_outputs_it = + sig_def_outputs.find(kSavedModelInitOpSignatureKey); + if (sig_def_outputs_it == sig_def_outputs.end()) { + return errors::FailedPrecondition("Could not find output ", + kSavedModelInitOpSignatureKey); + } + *init_op_name = sig_def_outputs_it->second.name(); return Status::OK(); } diff --git a/tensorflow/compiler/xla/statusor.h b/tensorflow/compiler/xla/statusor.h index a32e2ad9851b0b..da6fa9a19031d6 100644 --- a/tensorflow/compiler/xla/statusor.h +++ b/tensorflow/compiler/xla/statusor.h @@ -21,8 +21,7 @@ limitations under the License. namespace xla { // Use steam_executor's StatusOr so we don't duplicate code. -template -using StatusOr = ::stream_executor::port::StatusOr; +using tensorflow::StatusOr; // TENSORFLOW_STATUS_OK } // namespace xla diff --git a/tensorflow/core/common_runtime/immutable_executor_state.cc b/tensorflow/core/common_runtime/immutable_executor_state.cc index 3cde56bc85ab81..6af8c5f74c1eca 100644 --- a/tensorflow/core/common_runtime/immutable_executor_state.cc +++ b/tensorflow/core/common_runtime/immutable_executor_state.cc @@ -131,6 +131,7 @@ Status ImmutableExecutorState::Initialize(const Graph& graph) { Status s = params_.create_kernel(n->properties(), &item->kernel); if (!s.ok()) { + params_.delete_kernel(item->kernel); item->kernel = nullptr; s = AttachDef(s, *n); return s; diff --git a/tensorflow/core/framework/BUILD b/tensorflow/core/framework/BUILD index 653e7da49fcd1b..ba2f759070c177 100644 --- a/tensorflow/core/framework/BUILD +++ b/tensorflow/core/framework/BUILD @@ -705,6 +705,8 @@ cc_library( ":tensor_shape", ":types_proto_cc", "//tensorflow/core/lib/strings:strcat", + "//tensorflow/core/platform:macros", + "//tensorflow/core/platform:statusor", "//tensorflow/core/platform:tensor_coding", "//tensorflow/core/platform:types", "//tensorflow/core/util:managed_stack_trace", diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc index 76fe36e7f1e2a6..39e3ed888cec5f 100644 --- a/tensorflow/core/framework/attr_value_util.cc +++ b/tensorflow/core/framework/attr_value_util.cc @@ -45,7 +45,7 @@ constexpr int kMaxTensorNestDepth = 100; // not fully defined return -1. int64 TensorByteSize(const TensorProto& t) { // num_elements returns -1 if shape is not fully defined. - int64 num_elems = TensorShape(t.tensor_shape()).num_elements(); + int64 num_elems = PartialTensorShape(t.tensor_shape()).num_elements(); return num_elems < 0 ? -1 : num_elems * DataTypeSize(t.dtype()); } diff --git a/tensorflow/core/framework/common_shape_fns.cc b/tensorflow/core/framework/common_shape_fns.cc index e2777b67dd3bb6..a7f0740c8e8720 100644 --- a/tensorflow/core/framework/common_shape_fns.cc +++ b/tensorflow/core/framework/common_shape_fns.cc @@ -1937,7 +1937,7 @@ Status ConcatShapeHelper(InferenceContext* c, int start_value_index, } // Minimum required number of dimensions. - const int min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; + const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; ShapeHandle output_before; ShapeHandle output_after; diff --git a/tensorflow/core/framework/function.cc b/tensorflow/core/framework/function.cc index b84cfa31157233..ac066da8ba53ad 100644 --- a/tensorflow/core/framework/function.cc +++ b/tensorflow/core/framework/function.cc @@ -177,7 +177,9 @@ class FunctionInstantiationHelper { DataTypeVector dtypes; TF_RETURN_IF_ERROR( ArgNumType(attr_values, arg_def, &is_type_list, &dtypes)); - CHECK_GE(dtypes.size(), size_t{1}); + if (dtypes.size() < size_t{1}) { + return errors::Internal("Expected a list of at least one dtype"); + } int arg_index = result_.nodes.size(); TF_RETURN_IF_ERROR( AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes})); @@ -185,7 +187,11 @@ class FunctionInstantiationHelper { for (size_t i = 0; i < dtypes.size(); ++i) { TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i), {true, arg_index, 0, false, {dtypes[i]}})); - DCHECK_EQ(arg_index, result_.nodes.size()); + if (arg_index != result_.nodes.size()) { + return errors::Internal( + "Expected arg_index to be equal to the number of nodes in result.", + " Got ", arg_index, " and ", result_.nodes.size()); + } string name = arg_def.name(); if (dtypes.size() > 1) { strings::StrAppend(&name, "_", i); diff --git a/tensorflow/core/framework/op_def_util.cc b/tensorflow/core/framework/op_def_util.cc index 486f92b3b20fdb..8500f247bf0712 100644 --- a/tensorflow/core/framework/op_def_util.cc +++ b/tensorflow/core/framework/op_def_util.cc @@ -818,9 +818,10 @@ bool RepeatedAttrDefEqual( const protobuf::RepeatedPtrField& a2) { std::unordered_map a1_set; for (const OpDef::AttrDef& def : a1) { - DCHECK(a1_set.find(def.name()) == a1_set.end()) - << "AttrDef names must be unique, but '" << def.name() - << "' appears more than once"; + if (a1_set.find(def.name()) != a1_set.end()) { + LOG(ERROR) << "AttrDef names must be unique, but '" << def.name() + << "' appears more than once"; + } a1_set[def.name()] = &def; } for (const OpDef::AttrDef& def : a2) { diff --git a/tensorflow/core/framework/resource_handle.cc b/tensorflow/core/framework/resource_handle.cc index e7f4c2afc90a4a..c8306ca5cf23f9 100644 --- a/tensorflow/core/framework/resource_handle.cc +++ b/tensorflow/core/framework/resource_handle.cc @@ -15,14 +15,25 @@ limitations under the License. #include "tensorflow/core/framework/resource_handle.h" #include "tensorflow/core/framework/resource_handle.pb.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/strings/strcat.h" +#include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/platform/macros.h" namespace tensorflow { ResourceHandle::ResourceHandle() {} ResourceHandle::ResourceHandle(const ResourceHandleProto& proto) { - FromProto(proto); + TF_CHECK_OK(FromProto(proto)); +} + +Status ResourceHandle::BuildResourceHandle(const ResourceHandleProto& proto, + ResourceHandle* out) { + if (out == nullptr) + return errors::Internal( + "BuildResourceHandle() was called with nullptr for the output"); + return out->FromProto(proto); } ResourceHandle::~ResourceHandle() {} @@ -40,7 +51,7 @@ void ResourceHandle::AsProto(ResourceHandleProto* proto) const { } } -void ResourceHandle::FromProto(const ResourceHandleProto& proto) { +Status ResourceHandle::FromProto(const ResourceHandleProto& proto) { set_device(proto.device()); set_container(proto.container()); set_name(proto.name()); @@ -49,10 +60,16 @@ void ResourceHandle::FromProto(const ResourceHandleProto& proto) { std::vector dtypes_and_shapes; for (const auto& dtype_and_shape : proto.dtypes_and_shapes()) { DataType dtype = dtype_and_shape.dtype(); - PartialTensorShape shape(dtype_and_shape.shape()); + PartialTensorShape shape; + Status s = PartialTensorShape::BuildPartialTensorShape( + dtype_and_shape.shape(), &shape); + if (!s.ok()) { + return s; + } dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{dtype, shape}); } dtypes_and_shapes_ = std::move(dtypes_and_shapes); + return Status::OK(); } string ResourceHandle::SerializeAsString() const { @@ -63,9 +80,7 @@ string ResourceHandle::SerializeAsString() const { bool ResourceHandle::ParseFromString(const string& s) { ResourceHandleProto proto; - const bool status = proto.ParseFromString(s); - if (status) FromProto(proto); - return status; + return proto.ParseFromString(s) && FromProto(proto).ok(); } string ResourceHandle::DebugString() const { @@ -98,7 +113,9 @@ bool DecodeResourceHandleList(std::unique_ptr d, if (!proto.ParseFromArray(d->Data(sizes[i]), sizes[i])) { return false; } - ps[i].FromProto(proto); + if (!ps[i].FromProto(proto).ok()) { + return false; + } } return true; } diff --git a/tensorflow/core/framework/resource_handle.h b/tensorflow/core/framework/resource_handle.h index 3921d80faf4fe4..cba3b25d4b29f2 100644 --- a/tensorflow/core/framework/resource_handle.h +++ b/tensorflow/core/framework/resource_handle.h @@ -38,6 +38,11 @@ class ResourceHandle { ResourceHandle(const ResourceHandleProto& proto); ~ResourceHandle(); + // Use this factory method if the `proto` comes from user controlled input, to + // prevent a denial of service. + static Status BuildResourceHandle(const ResourceHandleProto& proto, + ResourceHandle* out); + // Unique name for the device containing the resource. const std::string& device() const { return device_; } @@ -83,7 +88,7 @@ class ResourceHandle { // Conversion to and from ResourceHandleProto void AsProto(ResourceHandleProto* proto) const; - void FromProto(const ResourceHandleProto& proto); + Status FromProto(const ResourceHandleProto& proto); // Serialization via ResourceHandleProto std::string SerializeAsString() const; diff --git a/tensorflow/core/framework/shape_inference.cc b/tensorflow/core/framework/shape_inference.cc index 721c20b7491aa0..432caaea2792e2 100644 --- a/tensorflow/core/framework/shape_inference.cc +++ b/tensorflow/core/framework/shape_inference.cc @@ -14,6 +14,8 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/shape_inference.h" +#include + #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/partial_tensor_shape.h" @@ -779,6 +781,19 @@ Status InferenceContext::InternalMakeShapeFromTensor( return ReturnUnknownShape(out); } const auto num_dims = Value(shape_dim); + // TODO(mihaimaruseac): Should be `TensorShape::MaxDimensions()` as we are + // not able to materialize shapes with more than this number of dimensions + // but then shape inference would fail for operations such as + // `tf.range`/`tf.ones`, etc. where the shape is not really materialized, + // only used during the inference. Hence, just prevent doing a `reserve` + // with a very large argument. + const int64_t max_dimensions = 1 << 25; + if (num_dims >= max_dimensions) { + return errors::Internal( + "Cannot create a tensor with ", num_dims, + " dimensions, as these would be more than maximum of ", + max_dimensions); + } std::vector dims; dims.reserve(num_dims); for (int i = 0; i < num_dims; i++) dims.push_back(UnknownDim()); diff --git a/tensorflow/core/framework/tensor.cc b/tensorflow/core/framework/tensor.cc index 167ba5e6587be2..9e3190cdab5ad2 100644 --- a/tensorflow/core/framework/tensor.cc +++ b/tensorflow/core/framework/tensor.cc @@ -531,6 +531,46 @@ TensorBuffer* FromProtoField(Allocator* a, const TensorProto& in, int64 n) { return buf; } +// Separate implementation for `ResourceHandle` to handle the case when the +// proto for the resource is invalid. See `resource_handle.h` constructor and +// static factory builder. +template <> +TensorBuffer* FromProtoField(Allocator* a, + const TensorProto& in, int64_t n) { + CHECK_GT(n, 0); + Buffer* buf = new Buffer(a, n); + ResourceHandle* data = buf->template base(); + if (data == nullptr) { + buf->Unref(); + return nullptr; + } + const int64_t in_n = ProtoHelper::NumElements(in); + if (in_n <= 0) { + std::fill_n(data, n, ResourceHandle()); + } else { + // If tensor shape says we have n < in_n elements in the output tensor + // then make sure to only decode the first n out of the in_n elements in the + // in tensors. In all other cases, we decode all in_n elements of in and set + // the remaining elements up to n to be the default ResourceHandle() value. + const int64_t real_n = n < in_n ? n : in_n; + for (int64_t i = 0; i < real_n; ++i) { + Status s = ResourceHandle::BuildResourceHandle(in.resource_handle_val(i), + &data[i]); + if (!s.ok()) { + LOG(ERROR) << "Could not decode resource handle from proto \"" + << in.resource_handle_val(i).ShortDebugString() + << "\", returned status: " << s.ToString(); + buf->Unref(); + return nullptr; + } + } + for (int64_t i = in_n; i < n; ++i) { + data[i] = ResourceHandle(); + } + } + return buf; +} + template <> TensorBuffer* FromProtoField(Allocator* a, const TensorProto& in, int64 n) { @@ -937,6 +977,15 @@ bool Tensor::FromProto(Allocator* a, const TensorProto& proto) { dtype_error = true, dtype_error = true); } if (dtype_error || p == nullptr) return false; + } else { + // Handle the case of empty tensors (N = 0) or tensors with incomplete shape + // (N = -1). All other values of `shape.num_elements()` should be invalid by + // construction. + // Here, we just need to validate that the `proto.dtype()` value is valid. + bool dtype_error = false; + CASES_WITH_DEFAULT(proto.dtype(), break, dtype_error = true, + dtype_error = true); + if (dtype_error) return false; } shape_ = shape; set_dtype(proto.dtype()); diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index 5144577e7aa0f5..117a70b7fa39a4 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -229,7 +229,7 @@ Status TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { if (!kIsPartial && !large_size) { for (auto s : dim_sizes) { if (TF_PREDICT_FALSE(s < 0)) { - return errors::Internal( + return errors::InvalidArgument( "Expected shape dimensions to be non-negative, got ", s); } } @@ -411,7 +411,8 @@ template Status TensorShapeBase::AddDimWithStatus(int64 size) { if (!kIsPartial) { if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + return errors::InvalidArgument("Expected a non-negative size, got ", + size); } } @@ -420,7 +421,7 @@ Status TensorShapeBase::AddDimWithStatus(int64 size) { } if (TF_PREDICT_FALSE(ndims_byte() >= MaxDimensions())) { - return errors::Internal("Too many dimensions in tensor"); + return errors::InvalidArgument("Too many dimensions in tensor"); } int64 new_num_elements; @@ -429,9 +430,9 @@ Status TensorShapeBase::AddDimWithStatus(int64 size) { } else { new_num_elements = MultiplyWithoutOverflow(num_elements(), size); if (TF_PREDICT_FALSE(new_num_elements < 0)) { - return errors::Internal("Encountered overflow when multiplying ", - num_elements(), " with ", size, - ", result: ", new_num_elements); + return errors::InvalidArgument("Encountered overflow when multiplying ", + num_elements(), " with ", size, + ", result: ", new_num_elements); } } @@ -521,7 +522,8 @@ template Status TensorShapeBase::InsertDimWithStatus(int d, int64 size) { if (!kIsPartial) { if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + return errors::InvalidArgument("Expected a non-negative size, got ", + size); } } @@ -591,13 +593,14 @@ void TensorShapeBase::set_dim(int d, int64 size) { template Status TensorShapeBase::SetDimWithStatus(int d, int64 size) { if (TF_PREDICT_FALSE(d < 0)) { - return errors::Internal("Index must be non-negative, got ", d); + return errors::InvalidArgument("Index must be non-negative, got ", d); } if (TF_PREDICT_FALSE(d >= dims())) { - return errors::Internal("Index must be less than ", dims(), ", got ", d); + return errors::InvalidArgument("Index must be less than ", dims(), ", got ", + d); } - if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + if (TF_PREDICT_FALSE(!kIsPartial && size < 0)) { + return errors::InvalidArgument("Expected a non-negative size, got ", size); } if (tag() == REP16 && size < kMaxRep16) { diff --git a/tensorflow/core/framework/tensor_shape.h b/tensorflow/core/framework/tensor_shape.h index a690123f0ceaf9..d12994304faf13 100644 --- a/tensorflow/core/framework/tensor_shape.h +++ b/tensorflow/core/framework/tensor_shape.h @@ -359,6 +359,23 @@ class TensorShape : public TensorShapeBase { public: using TensorShapeBase::TensorShapeBase; + // These factory methods should be used instead of the constructors that take + // an array of sizes if calling code cannot validate that the sizes specify a + // valid `TensorShape`. + // The value in `*out` is valid iff the returned value is `Status::OK`. + static Status BuildTensorShape(gtl::ArraySlice dim_sizes, + TensorShape* out) { + return BuildTensorShapeBase(dim_sizes, out); + } + static Status BuildTensorShape(std::initializer_list dim_sizes, + TensorShape* out) { + return BuildTensorShape(gtl::ArraySlice(dim_sizes), out); + } + static Status BuildTensorShape(const TensorShapeProto& proto, + TensorShape* out) { + return BuildTensorShapeBase(proto, out); + } + /// Allow a TensorShape to be used as a PartialTensorShape without copying operator const PartialTensorShape&() const; // NOLINT(runtime/explicit) @@ -508,6 +525,23 @@ class PartialTensorShape : public TensorShapeBase { PartialTensorShape() {} using TensorShapeBase::TensorShapeBase; + // These factory methods should be used instead of the constructors that take + // an array of sizes if calling code cannot validate that the sizes specify a + // valid `PartialTensorShape`. + // The value in `*out` is valid iff the returned value is `Status::OK`. + static Status BuildPartialTensorShape(gtl::ArraySlice dim_sizes, + PartialTensorShape* out) { + return BuildTensorShapeBase(dim_sizes, out); + } + static Status BuildPartialTensorShape( + std::initializer_list dim_sizes, PartialTensorShape* out) { + return BuildPartialTensorShape(gtl::ArraySlice(dim_sizes), out); + } + static Status BuildPartialTensorShape(const TensorShapeProto& proto, + PartialTensorShape* out) { + return BuildTensorShapeBase(proto, out); + } + /// Add a dimension to the end ("inner-most"), returns a new /// PartialTensorShape. /// REQUIRES: `size >= -1`, where -1 means unknown diff --git a/tensorflow/core/framework/tensor_shape_test.cc b/tensorflow/core/framework/tensor_shape_test.cc index f41d00f2a46472..cf087d0647f662 100644 --- a/tensorflow/core/framework/tensor_shape_test.cc +++ b/tensorflow/core/framework/tensor_shape_test.cc @@ -214,7 +214,7 @@ TEST(TensorShapeTest, AddDimWithStatus) { ASSERT_EQ(4, s.dims()); status = s.AddDimWithStatus(-1); - EXPECT_EQ(tensorflow::error::INTERNAL, status.code()); + EXPECT_EQ(tensorflow::error::INVALID_ARGUMENT, status.code()); } TEST(TensorShapeTest, Factory) { @@ -225,7 +225,7 @@ TEST(TensorShapeTest, Factory) { ASSERT_EQ(3, s.dims()); status = TensorShape::BuildTensorShapeBase({-10, 5, 20}, &s); - EXPECT_EQ(tensorflow::error::INTERNAL, status.code()); + EXPECT_EQ(tensorflow::error::INVALID_ARGUMENT, status.code()); } // ----------------------------------------------------------------------- diff --git a/tensorflow/core/grappler/costs/BUILD b/tensorflow/core/grappler/costs/BUILD index 9204607f3838e8..80a6648cd5b2cf 100644 --- a/tensorflow/core/grappler/costs/BUILD +++ b/tensorflow/core/grappler/costs/BUILD @@ -182,6 +182,7 @@ tf_cuda_library( "//tensorflow/core:lib_proto_parsing", "//tensorflow/core:protos_all_cc", "//tensorflow/core/grappler:utils", + "//tensorflow/core/util:overflow", "//tensorflow/core/grappler/clusters:utils", ] + tf_protos_grappler(), ) @@ -338,22 +339,11 @@ cc_library( "//tensorflow/core:lib", "//tensorflow/core:protos_all_cc", "//tensorflow/core/grappler/clusters:utils", + "//tensorflow/core/util:overflow", + "//tensorflow/core/platform:statusor", ] + tf_protos_grappler(), ) -tf_cc_test( - name = "op_level_cost_estimator_test", - srcs = ["op_level_cost_estimator_test.cc"], - tags = ["no_oss"], # b/163222310 - deps = [ - ":op_level_cost_estimator", - "//tensorflow/core:framework", - "//tensorflow/core:protos_all_cc", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - ], -) - cc_library( name = "analytical_cost_estimator", srcs = ["analytical_cost_estimator.cc"], diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc index 644efe3326ab9f..441a7524bb4eda 100644 --- a/tensorflow/core/grappler/costs/graph_properties.cc +++ b/tensorflow/core/grappler/costs/graph_properties.cc @@ -1128,7 +1128,12 @@ class SymbolicShapeRefiner { GetUnknownOutputShape(node, output_port); InferenceContext* ctx = GetContext(node); if (ctx == nullptr) { - return errors::InvalidArgument("Missing context"); + return errors::InvalidArgument("SetUnknownShape: Missing context"); + } + if (output_port < 0 || output_port >= ctx->num_outputs()) { + return errors::InvalidArgument( + "SetUnknownShape: output_port must be in [0, ", ctx->num_outputs(), + ") but was ", output_port); } ctx->set_output(output_port, shape); return Status::OK(); diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc index 009f2471d39fd5..ae6bc399aec544 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/grappler/costs/op_context.h" #include "tensorflow/core/grappler/costs/utils.h" #include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/util/overflow.h" namespace tensorflow { namespace grappler { @@ -1535,7 +1536,14 @@ int64 OpLevelCostEstimator::CalculateTensorElementCount( auto tensor_shape = MaybeGetMinimumShape(tensor.shape(), num_dims, found_unknown_shapes); for (const auto& dim : tensor_shape.dim()) { - tensor_size *= dim.size(); + int64_t new_tensor_size = MultiplyWithoutOverflow(tensor_size, dim.size()); + if (new_tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing element count of a " + "tensor, multiplying " + << tensor_size << " with " << dim.size(); + return -1; + } + tensor_size = new_tensor_size; } return tensor_size; } @@ -1545,7 +1553,13 @@ int64 OpLevelCostEstimator::CalculateTensorSize( int64 count = CalculateTensorElementCount(tensor, found_unknown_shapes); int size = DataTypeSize(BaseType(tensor.dtype())); VLOG(2) << "Count: " << count << " DataTypeSize: " << size; - return count * size; + int64_t tensor_size = MultiplyWithoutOverflow(count, size); + if (tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing tensor size, multiplying " + << count << " with " << size; + return -1; + } + return tensor_size; } int64 OpLevelCostEstimator::CalculateInputSize(const OpInfo& op_info, @@ -1598,7 +1612,14 @@ int64 OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info, auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims, found_unknown_shapes); for (const auto& dim : output_shape.dim()) { - output_size *= dim.size(); + int64_t new_output_size = + MultiplyWithoutOverflow(output_size, dim.size()); + if (new_output_size < 0) { + VLOG(1) << "Overflow encountered when estimating cost, multiplying " + << output_size << " with " << dim.size(); + return -1; + } + output_size = new_output_size; } total_output_size += output_size; VLOG(1) << "Output Size: " << output_size @@ -2121,7 +2142,7 @@ OpInfo::TensorProperties OpLevelCostEstimator::DescribeTensor( } /* static */ -OpLevelCostEstimator::ConvolutionDimensions +StatusOr OpLevelCostEstimator::OpDimensionsFromInputs( const TensorShapeProto& original_image_shape, const OpInfo& op_info, bool* found_unknown_shapes) { @@ -2158,6 +2179,11 @@ OpLevelCostEstimator::OpDimensionsFromInputs( std::vector strides = GetStrides(op_info); int64 sx = strides[x_index]; int64 sy = strides[y_index]; + if (sx == 0 || sy == 0) { + return errors::InvalidArgument( + "Stride must be > 0 for Height and Width, but got (", sy, ", ", sx, + ")"); + } const auto padding = GetPadding(op_info); int64 ox = GetOutputSize(ix, kx, sx, padding); @@ -2174,8 +2200,9 @@ Status OpLevelCostEstimator::PredictMaxPool(const OpContext& op_context, bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); // kx * ky - 1 comparisons per output (kx * xy > 1) // or 1 copy per output (kx * k1 = 1). int per_output_ops = dims.kx * dims.ky == 1 ? 1 : dims.kx * dims.ky - 1; @@ -2215,8 +2242,9 @@ Status OpLevelCostEstimator::PredictMaxPoolGrad(const OpContext& op_context, op_info.ShortDebugString()); } - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); int64 ops = 0; if (dims.kx == 1 && dims.ky == 1) { @@ -2291,8 +2319,9 @@ Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context, bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); // kx * ky - 1 additions and 1 multiplication per output. int64 ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky; @@ -2348,8 +2377,9 @@ Status OpLevelCostEstimator::PredictAvgPoolGrad(const OpContext& op_context, found_unknown_shapes = true; } - ConvolutionDimensions dims = - OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN( + ConvolutionDimensions dims, + OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes)); int64 ops = 0; if (dims.kx <= dims.sx && dims.ky <= dims.sy) { @@ -2375,8 +2405,9 @@ Status OpLevelCostEstimator::PredictFusedBatchNorm( // offset: op_info.inputs(2) // mean: op_info.inputs(3) --> only for inference // variance: op_info.inputs(4) --> only for inference - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); const bool is_training = IsTraining(op_info); int64 ops = 0; @@ -2425,8 +2456,9 @@ Status OpLevelCostEstimator::PredictFusedBatchNormGrad( // scale: op_info.inputs(2) // mean: op_info.inputs(3) // variance or inverse of variance: op_info.inputs(4) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(1).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(1).shape(), op_info, + &found_unknown_shapes)); int64 ops = 0; const auto rsqrt_cost = Eigen::internal::functor_traits< @@ -2646,27 +2678,42 @@ Status OpLevelCostEstimator::PredictCropAndResize(const OpContext& op_context, // calculation differs from rough estimate in implementation, as it separates // out cost per box from cost per pixel and cost per element. + // Since crop arguments are user controlled, check for overflow. + int64_t crop_area = MultiplyWithoutOverflow(crop_height, crop_width); + if (crop_area < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_height, " with ", crop_width, + " would overflow"); + int64_t crop_volume = MultiplyWithoutOverflow(crop_area, num_boxes); + if (crop_volume < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_area, " with ", num_boxes, + " would overflow"); + int64_t crop_depth = MultiplyWithoutOverflow(crop_height, num_boxes); + if (crop_depth < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_height, " with ", num_boxes, + " would overflow"); + // Ops for variables height_scale and width_scale. int64 ops = (sub_cost * 6 + mul_cost * 2 + div_cost * 2) * num_boxes; // Ops for variable in_y. - ops += (mul_cost * 2 + sub_cost + add_cost) * crop_height * num_boxes; + ops += (mul_cost * 2 + sub_cost + add_cost) * crop_depth; // Ops for variable in_x (same computation across both branches). - ops += (mul_cost * 2 + sub_cost + add_cost) * crop_height * crop_width * - num_boxes; + ops += (mul_cost * 2 + sub_cost + add_cost) * crop_volume; // Specify op_cost based on the method. if (use_bilinear_interp) { // Ops for variables top_y_index, bottom_y_index, y_lerp. - ops += (floor_cost + ceil_cost + sub_cost) * crop_height * num_boxes; + ops += (floor_cost + ceil_cost + sub_cost) * crop_depth; // Ops for variables left_x, right_x, x_lerp; - ops += (floor_cost + ceil_cost + sub_cost) * crop_height * crop_width * - num_boxes; + ops += (floor_cost + ceil_cost + sub_cost) * crop_volume; // Ops for innermost loop across depth. ops += (cast_to_float_cost * 4 + add_cost * 3 + sub_cost * 3 + mul_cost * 3) * output_elements; } else /* method == "nearest" */ { // Ops for variables closest_x_index and closest_y_index. - ops += round_cost * 2 * crop_height * crop_width * num_boxes; + ops += round_cost * 2 * crop_volume; // Ops for innermost loop across depth. ops += cast_to_float_cost * output_elements; } diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.h b/tensorflow/core/grappler/costs/op_level_cost_estimator.h index 54382927f7b904..3148de33fa9ba5 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.h +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.h @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/grappler/costs/op_context.h" #include "tensorflow/core/grappler/costs/op_performance_data.pb.h" #include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/util/padding.h" namespace tensorflow { @@ -290,7 +291,7 @@ class OpLevelCostEstimator { bool* found_unknown_shapes); // For Pooling, FusedBatchNorm, and their grad ops. - static ConvolutionDimensions OpDimensionsFromInputs( + static StatusOr OpDimensionsFromInputs( const TensorShapeProto& original_image_shape, const OpInfo& op_info, bool* found_unknown_shapes); diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc index 23373d3dc1b629..eda84ec3276001 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/device_properties.pb.h" @@ -558,9 +559,10 @@ class OpLevelCostEstimatorTest : public ::testing::Test { } bool found_unknown_shapes; - auto dims = OpLevelCostEstimator::OpDimensionsFromInputs( - op_context.op_info.inputs(0).shape(), op_context.op_info, - &found_unknown_shapes); + TF_ASSERT_OK_AND_ASSIGN( + auto dims, OpLevelCostEstimator::OpDimensionsFromInputs( + op_context.op_info.inputs(0).shape(), op_context.op_info, + &found_unknown_shapes)); Padding padding_enum; if (padding == "VALID") { padding_enum = Padding::VALID; @@ -581,6 +583,38 @@ class OpLevelCostEstimatorTest : public ::testing::Test { EXPECT_EQ(padding_enum, dims.padding); } + StatusOr + CallOpDimensionsFromInputs(const int n, const int h, const int w, const int c, + const int kx, const int ky, const int sx, + const int sy, const string& data_format, + const string& padding) { + OpContext op_context; + + const std::vector x = {n, h, w, c}; + const std::vector ksize = {1, kx, ky, 1}; + std::vector strides; + if (data_format == "NHWC") { + strides = {1, sy, sx, 1}; + } else { + strides = {1, 1, sy, sx}; + } + + auto& op_info = op_context.op_info; + SetCpuDevice(&op_info); + op_info.set_op("MaxPool"); + + DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs()); + auto* attr = op_info.mutable_attr(); + SetAttrValue(data_format, &(*attr)["data_format"]); + SetAttrValue(padding, &(*attr)["padding"]); + SetAttrValue(strides, &(*attr)["strides"]); + SetAttrValue(ksize, &(*attr)["ksize"]); + bool found_unknown_shapes; + return OpLevelCostEstimator::OpDimensionsFromInputs( + op_context.op_info.inputs(0).shape(), op_context.op_info, + &found_unknown_shapes); + } + OpLevelCostEstimator estimator_; }; @@ -1383,6 +1417,26 @@ TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputs) { } } +TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputsError) { + std::vector paddings = {"VALID", "SAME"}; + std::vector formats = {"NHWC", "NCHW"}; + for (const auto& p : paddings) { + for (const auto& f : formats) { + // n, h, w, c, kx, ky, sx, sy, data_format, padding. + ASSERT_THAT( + CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 0, 2, f, p), + testing::StatusIs( + error::INVALID_ARGUMENT, + "Stride must be > 0 for Height and Width, but got (2, 0)")); + ASSERT_THAT( + CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 0, f, p), + testing::StatusIs( + error::INVALID_ARGUMENT, + "Stride must be > 0 for Height and Width, but got (0, 2)")); + } + } +} + TEST_F(OpLevelCostEstimatorTest, PredictMaxPool) { auto predict_max_pool = [this](const int n, const int in, const int c, const int k, const int s, diff --git a/tensorflow/core/grappler/costs/utils.cc b/tensorflow/core/grappler/costs/utils.cc index c6bc7555d3d1a3..d48695c0793362 100644 --- a/tensorflow/core/grappler/costs/utils.cc +++ b/tensorflow/core/grappler/costs/utils.cc @@ -45,6 +45,7 @@ limitations under the License. #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/util/device_name_utils.h" +#include "tensorflow/core/util/overflow.h" namespace tensorflow { namespace grappler { @@ -217,7 +218,13 @@ int64 CalculateTensorSize(const OpInfo::TensorProperties& prop) { } int64 num_elems = TensorShape(shape).num_elements(); - return num_elems * size; + int64 tensor_size = MultiplyWithoutOverflow(num_elems, size); + if (tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing tensor size, multiplying " + << num_elems << " with " << size; + return -1; + } + return tensor_size; } int64 CalculateOutputSize( diff --git a/tensorflow/core/grappler/costs/utils_test.cc b/tensorflow/core/grappler/costs/utils_test.cc index db5c11f0fe102d..6f6d3b2a14b0d9 100644 --- a/tensorflow/core/grappler/costs/utils_test.cc +++ b/tensorflow/core/grappler/costs/utils_test.cc @@ -202,6 +202,10 @@ TEST(UtilsTest, CalculateTensorSize) { EXPECT_EQ( DataTypeSize(DT_FLOAT) * 1 * 7 * 1 * 99, CalculateTensorSize(ShapeToTensorProperty({-1, 7, -1, 99}, DT_FLOAT))); + + // Test overflow + EXPECT_EQ(-1, CalculateTensorSize(ShapeToTensorProperty( + {4096, 4096, 4096, 33554432}, DT_FLOAT))); } TEST(UtilsTest, CalculateOutputSize) { diff --git a/tensorflow/core/grappler/mutable_graph_view.cc b/tensorflow/core/grappler/mutable_graph_view.cc index 5119acd6141270..4503c90cf466a9 100644 --- a/tensorflow/core/grappler/mutable_graph_view.cc +++ b/tensorflow/core/grappler/mutable_graph_view.cc @@ -68,6 +68,9 @@ bool IsIdentityConsumingSwitch(const MutableGraphView& graph, } NodeDef* input_node = graph.GetNode(tensor_id.node()); + if (input_node == nullptr) { + return false; + } return IsSwitch(*input_node); } return false; diff --git a/tensorflow/core/grappler/optimizers/constant_folding.cc b/tensorflow/core/grappler/optimizers/constant_folding.cc index df4cc54757134a..db88130b4afbc5 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding.cc @@ -1013,7 +1013,12 @@ bool ConstantFolding::IsFoldableUncached( } } for (const auto& output_prop : output_props) { - const PartialTensorShape output_shape(output_prop.shape()); + PartialTensorShape output_shape; + if (!PartialTensorShape::BuildPartialTensorShape(output_prop.shape(), + &output_shape) + .ok()) { + return false; + } if (output_shape.IsFullyDefined()) { const int64 num_bytes = output_shape.num_elements() * DataTypeSize(output_prop.dtype()); @@ -1350,6 +1355,11 @@ Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node, } TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value")); const TensorProto& raw_val = input_node->attr().at("value").tensor(); + if (IsRefType(raw_val.dtype())) { + return errors::InvalidArgument( + "Not allowed to construct a tensor with reference dtype, got ", + DataTypeString(raw_val.dtype())); + } Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape()); CHECK(value->FromProto(raw_val)) << "Unable to make Tensor from proto for " << node.name() @@ -1665,15 +1675,21 @@ Status ConstantFolding::FoldGraph( return Status::OK(); } -bool ConstantFolding::IsSimplifiableReshape( +Status ConstantFolding::IsSimplifiableReshape( const NodeDef& node, const GraphProperties& properties) const { if (!IsReshape(node)) { - return false; + return errors::Internal("Node ", node.name(), " is not a Reshape node"); + } + if (2 > node.input_size()) { + return errors::Internal("Node ", node.name(), + " must have at most 2 inputs but has ", + node.input_size()); } - CHECK_LE(2, node.input_size()); const NodeDef* new_shape = node_map_->GetNode(node.input(1)); if (!IsReallyConstant(*new_shape)) { - return false; + return errors::Internal("Node ", node.name(), " has shape ", + new_shape->DebugString(), + " which is not a constant"); } TensorVector outputs; auto outputs_cleanup = gtl::MakeCleanup([&outputs] { @@ -1684,22 +1700,29 @@ bool ConstantFolding::IsSimplifiableReshape( Status s = EvaluateNode(*new_shape, TensorVector(), &outputs); if (!s.ok()) { - return false; + return errors::Internal("Could not evaluate node ", node.name()); + } + if (outputs.size() != 1) { + return errors::Internal("Node ", node.name(), + " must have exactly 1 output but has ", + outputs.size()); } - CHECK_EQ(1, outputs.size()); const std::vector& props = properties.GetInputProperties(node.name()); if (props.empty()) { - return false; + return errors::Internal("Node ", node.name(), " has no properties"); } const OpInfo::TensorProperties& prop = props[0]; if (prop.dtype() == DT_INVALID) { - return false; + return errors::Internal("Node ", node.name(), " has property ", + prop.DebugString(), " with invalid dtype"); } const PartialTensorShape shape(prop.shape()); if (!shape.IsFullyDefined()) { - return false; + return errors::Internal("Node ", node.name(), " has property ", + prop.DebugString(), " with shape ", + shape.DebugString(), " which is not fully defined"); } PartialTensorShape new_dims; @@ -1709,17 +1732,24 @@ bool ConstantFolding::IsSimplifiableReshape( int32 dim = outputs[0]->flat()(i); shp.push_back(dim); } - TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); + s = TensorShapeUtils::MakeShape(shp, &new_dims); + if (!s.ok()) return s; } else { std::vector shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int64 dim = outputs[0]->flat()(i); shp.push_back(dim); } - TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); + s = TensorShapeUtils::MakeShape(shp, &new_dims); + if (!s.ok()) return s; } - return shape.IsCompatibleWith(new_dims); + if (!shape.IsCompatibleWith(new_dims)) { + return errors::Internal("Expected shape ", shape.DebugString(), + "to be compatible with ", new_dims.DebugString()); + } + + return Status::OK(); } #define IS_VALUE_CASE(DTYPE, VALUE) \ @@ -2905,7 +2935,7 @@ bool ConstantFolding::SimplifyReduction(GraphDef* optimized_graph, bool ConstantFolding::SimplifyReshape(const GraphProperties& properties, bool use_shape_info, NodeDef* node) { if (!use_shape_info || node->attr().count("T") == 0 || - !IsSimplifiableReshape(*node, properties)) { + !IsSimplifiableReshape(*node, properties).ok()) { return false; } DataType output_type = node->attr().at("T").type(); @@ -3454,6 +3484,9 @@ bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node, NodeDef* mul_left_child = node_map_->GetNode(node->input(0)); NodeDef* mul_right_child = node_map_->GetNode(node->input(1)); + if (mul_left_child == nullptr || mul_right_child == nullptr) { + return false; + } // One child must be constant, and the second must be Conv op. const bool left_child_is_constant = IsReallyConstant(*mul_left_child); const bool right_child_is_constant = IsReallyConstant(*mul_right_child); diff --git a/tensorflow/core/grappler/optimizers/constant_folding.h b/tensorflow/core/grappler/optimizers/constant_folding.h index 8462f002021998..0d16f1ade61c3b 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.h +++ b/tensorflow/core/grappler/optimizers/constant_folding.h @@ -132,8 +132,8 @@ class ConstantFolding : public GraphOptimizer { Status FoldGraph(const GraphProperties& properties, GraphDef* output, absl::flat_hash_set* nodes_to_not_simplify); - bool IsSimplifiableReshape(const NodeDef& node, - const GraphProperties& properties) const; + Status IsSimplifiableReshape(const NodeDef& node, + const GraphProperties& properties) const; Status SimplifyGraph(bool use_shape_info, GraphDef* optimized_graph, GraphProperties* properties, absl::flat_hash_set* nodes_to_not_simplify); diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc index 1be7f2692e0f76..0f1bbb729edd23 100644 --- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc @@ -75,8 +75,10 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const { } const NodeDef* input = node_map_->GetNode(NodeName(node.input(0))); - CHECK(input != nullptr) << "node = " << node.name() - << " input = " << node.input(0); + if (input == nullptr) { + VLOG(1) << "node = " << node.name() << " input = " << node.input(0); + return false; + } // Don't remove Identity nodes corresponding to Variable reads or following // Recv. if (IsVariable(*input) || IsRecv(*input)) { diff --git a/tensorflow/core/kernels/assign_op.h b/tensorflow/core/kernels/assign_op.h index 74f926bdc88bf7..8aa56e2e29ed0b 100644 --- a/tensorflow/core/kernels/assign_op.h +++ b/tensorflow/core/kernels/assign_op.h @@ -50,6 +50,12 @@ class AssignOp : public OpKernel { // We always return the input ref. context->forward_ref_input_to_ref_output(0, 0); + // Prevent copying uninitialized data, to solve harder to debug undefined + // behaviors that cannot be traced back to the original tensor. + OP_REQUIRES( + context, rhs.IsInitialized(), + errors::Internal("Right hand side of AssignOp is not initialized")); + // We can't always know how this value will be used downstream, so make // conservative assumptions in specifying constraints on the memory // allocation attributes, unless the Grappler graph analysis determined that diff --git a/tensorflow/core/kernels/bincount_op.cc b/tensorflow/core/kernels/bincount_op.cc index 6299ca3e3b1d90..5c2ee797e62cea 100644 --- a/tensorflow/core/kernels/bincount_op.cc +++ b/tensorflow/core/kernels/bincount_op.cc @@ -235,6 +235,9 @@ class DenseBincountOp : public OpKernel { const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, @@ -331,6 +334,9 @@ class SparseBincountOp : public OpKernel { const auto weights = ctx->input(4).flat(); const int64 weights_size = weights.size(); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, @@ -421,6 +427,9 @@ class RaggedBincountOp : public OpKernel { const auto weights = ctx->input(3).flat(); const int64 weights_size = weights.size(); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc index 008962c33ecb10..3da2efd3530432 100644 --- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc @@ -37,7 +37,7 @@ limitations under the License. namespace tensorflow { static void ConvertVectorsToMatrices( - const OpInputList bucketized_features_list, + OpKernelContext* const context, const OpInputList bucketized_features_list, std::vector::ConstMatrix>& bucketized_features) { for (const Tensor& tensor : bucketized_features_list) { if (tensor.dims() == 1) { @@ -45,6 +45,10 @@ static void ConvertVectorsToMatrices( bucketized_features.emplace_back( TTypes::ConstMatrix(v.data(), v.size(), 1)); } else { + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(tensor.shape()), + errors::Internal("Cannot use tensor as matrix, expected " + "vector or matrix, received shape ", + tensor.shape().DebugString())); bucketized_features.emplace_back(tensor.matrix()); } } @@ -58,6 +62,9 @@ class BoostedTreesTrainingPredictOp : public OpKernel { public: explicit BoostedTreesTrainingPredictOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -76,17 +83,26 @@ class BoostedTreesTrainingPredictOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); const Tensor* cached_tree_ids_t; OP_REQUIRES_OK(context, context->input("cached_tree_ids", &cached_tree_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(cached_tree_ids_t->shape()), + errors::InvalidArgument( + "cached_tree_ids must be a vector, received shape ", + cached_tree_ids_t->shape().DebugString())); const auto cached_tree_ids = cached_tree_ids_t->vec(); const Tensor* cached_node_ids_t; OP_REQUIRES_OK(context, context->input("cached_node_ids", &cached_node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(cached_node_ids_t->shape()), + errors::InvalidArgument( + "cached_node_ids must be a vector, received shape ", + cached_node_ids_t->shape().DebugString())); const auto cached_node_ids = cached_node_ids_t->vec(); // Allocate outputs. @@ -118,9 +134,9 @@ class BoostedTreesTrainingPredictOp : public OpKernel { output_partial_logits.setZero(); } else { output_tree_ids.setConstant(latest_tree); - auto do_work = [&resource, &bucketized_features, &cached_tree_ids, - &cached_node_ids, &output_partial_logits, - &output_node_ids, latest_tree, + auto do_work = [&context, &resource, &bucketized_features, + &cached_tree_ids, &cached_node_ids, + &output_partial_logits, &output_node_ids, latest_tree, this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { int32 tree_id = cached_tree_ids(i); @@ -138,7 +154,11 @@ class BoostedTreesTrainingPredictOp : public OpKernel { // node's value. The following logic handles both of these cases. const auto& node_logits = resource->node_value(tree_id, node_id); if (!node_logits.empty()) { - DCHECK_EQ(node_logits.size(), logits_dimension_); + OP_REQUIRES( + context, node_logits.size() == logits_dimension_, + errors::Internal( + "Expected node_logits.size() == logits_dimension_, got ", + node_logits.size(), " vs ", logits_dimension_)); for (int32 j = 0; j < logits_dimension_; ++j) { partial_tree_logits[j] -= node_logits[j]; } @@ -151,7 +171,11 @@ class BoostedTreesTrainingPredictOp : public OpKernel { while (true) { if (resource->is_leaf(tree_id, node_id)) { const auto& leaf_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(leaf_logits.size(), logits_dimension_); + OP_REQUIRES( + context, leaf_logits.size() == logits_dimension_, + errors::Internal( + "Expected leaf_logits.size() == logits_dimension_, got ", + leaf_logits.size(), " vs ", logits_dimension_)); // Tree is done const float tree_weight = resource->GetTreeWeight(tree_id); for (int32 j = 0; j < logits_dimension_; ++j) { @@ -201,6 +225,9 @@ class BoostedTreesPredictOp : public OpKernel { public: explicit BoostedTreesPredictOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -219,7 +246,8 @@ class BoostedTreesPredictOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // Allocate outputs. @@ -236,8 +264,8 @@ class BoostedTreesPredictOp : public OpKernel { } const int32 last_tree = resource->num_trees() - 1; - auto do_work = [&resource, &bucketized_features, &output_logits, last_tree, - this](int64 start, int64 end) { + auto do_work = [&context, &resource, &bucketized_features, &output_logits, + last_tree, this](int64_t start, int64_t end) { for (int32 i = start; i < end; ++i) { std::vector tree_logits(logits_dimension_, 0.0); int32 tree_id = 0; @@ -246,7 +274,11 @@ class BoostedTreesPredictOp : public OpKernel { if (resource->is_leaf(tree_id, node_id)) { const float tree_weight = resource->GetTreeWeight(tree_id); const auto& leaf_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(leaf_logits.size(), logits_dimension_); + OP_REQUIRES( + context, leaf_logits.size() == logits_dimension_, + errors::Internal( + "Expected leaf_logits.size() == logits_dimension_, got ", + leaf_logits.size(), " vs ", logits_dimension_)); for (int32 j = 0; j < logits_dimension_; ++j) { tree_logits[j] += tree_weight * leaf_logits[j]; } @@ -298,6 +330,9 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { explicit BoostedTreesExampleDebugOutputsOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -319,7 +354,8 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // We need to get the feature ids used for splitting and the logits after @@ -339,14 +375,16 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { // features used to split and the associated logits at each point along the // path. Note: feature_ids has one less value than logits_path because the // first value of each logit path will be the bias. - auto do_work = [&resource, &bucketized_features, &output_debug_info, - last_tree](int64 start, int64 end) { + auto do_work = [&context, &resource, &bucketized_features, + &output_debug_info, last_tree](int64_t start, int64_t end) { for (int32 i = start; i < end; ++i) { // Proto to store debug outputs, per example. boosted_trees::DebugOutput example_debug_info; // Initial bias prediction. E.g., prediction based off training mean. const auto& tree_logits = resource->node_value(0, 0); - DCHECK_EQ(tree_logits.size(), 1); + OP_REQUIRES(context, tree_logits.size() == 1, + errors::Internal("Expected tree_logits.size() == 1, got ", + tree_logits.size())); float tree_logit = resource->GetTreeWeight(0) * tree_logits[0]; example_debug_info.add_logits_path(tree_logit); int32 node_id = 0; @@ -372,7 +410,10 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { node_id = resource->next_node(tree_id, node_id, i, bucketized_features); const auto& tree_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(tree_logits.size(), 1); + OP_REQUIRES( + context, tree_logits.size() == 1, + errors::Internal("Expected tree_logits.size() == 1, got ", + tree_logits.size())); tree_logit = resource->GetTreeWeight(tree_id) * tree_logits[0]; // Output logit incorporates sum of leaf logits from prior trees. example_debug_info.add_logits_path(tree_logit + past_trees_logit); diff --git a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc index 916db1f436148b..5d4fd8c6778ff8 100644 --- a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc @@ -98,6 +98,9 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { explicit BoostedTreesCreateQuantileStreamResourceOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kMaxElementsName, &max_elements_)); } @@ -108,6 +111,10 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { // disallowed. const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(epsilon_t->shape()), + errors::InvalidArgument( + "epsilon must be a scalar, got a tensor of shape ", + epsilon_t->shape().DebugString())); float epsilon = epsilon_t->scalar()(); OP_REQUIRES( context, epsilon > 0, @@ -115,6 +122,10 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { const Tensor* num_streams_t; OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_streams_t->shape()), + errors::InvalidArgument( + "num_streams must be a scalar, got a tensor of shape ", + num_streams_t->shape().DebugString())); int64 num_streams = num_streams_t->scalar()(); OP_REQUIRES(context, num_streams >= 0, errors::InvalidArgument( @@ -143,6 +154,9 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { explicit BoostedTreesMakeQuantileSummariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -156,7 +170,8 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { const Tensor* example_weights_t; OP_REQUIRES_OK(context, context->input(kExampleWeightsName, &example_weights_t)); - DCHECK(float_features_list.size() > 0) << "Got empty feature list"; + OP_REQUIRES(context, float_features_list.size() > 0, + errors::Internal("Got empty feature list")); auto example_weights = example_weights_t->flat(); const int64 weight_size = example_weights.size(); const int64 batch_size = float_features_list[0].flat().size(); @@ -166,6 +181,10 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { "Weights should be a single value or same size as features."))); const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(epsilon_t->shape()), + errors::InvalidArgument( + "epsilon must be a scalar, got a tensor of shape ", + epsilon_t->shape().DebugString())); float epsilon = epsilon_t->scalar()(); OpOutputList summaries_output_list; @@ -190,7 +209,8 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { context, summaries_output_list.allocate( index, - TensorShape({static_cast(summary_entry_list.size()), 4}), + TensorShape( + {static_cast(summary_entry_list.size()), 4}), &output_t)); auto output = output_t->matrix(); for (auto row = 0; row < summary_entry_list.size(); row++) { @@ -223,6 +243,9 @@ class BoostedTreesFlushQuantileSummariesOp : public OpKernel { explicit BoostedTreesFlushQuantileSummariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -282,7 +305,11 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { public: explicit BoostedTreesQuantileStreamResourceAddSummariesOp( OpKernelConstruction* const context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { ResourceHandle handle; @@ -298,7 +325,10 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { OP_REQUIRES_OK(context, context->input_list(kSummariesName, &summaries_list)); int32 num_streams = stream_resource->num_streams(); - CHECK_EQ(static_cast(num_streams), summaries_list.size()); + OP_REQUIRES( + context, num_streams == summaries_list.size(), + errors::Internal("Expected num_streams == summaries_list.size(), got ", + num_streams, " vs ", summaries_list.size())); auto do_quantile_add_summary = [&](const int64 begin, const int64 end) { // Iterating all features. @@ -313,7 +343,10 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { const auto summary_values = summaries.matrix(); const auto& tensor_shape = summaries.shape(); const int64 entries_size = tensor_shape.dim_size(0); - CHECK_EQ(tensor_shape.dim_size(1), 4); + OP_REQUIRES( + context, tensor_shape.dim_size(1) == 4, + errors::Internal("Expected tensor_shape.dim_size(1) == 4, got ", + tensor_shape.dim_size(1))); std::vector summary_entries; summary_entries.reserve(entries_size); for (int64 i = 0; i < entries_size; i++) { @@ -346,6 +379,9 @@ class BoostedTreesQuantileStreamResourceDeserializeOp : public OpKernel { explicit BoostedTreesQuantileStreamResourceDeserializeOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumStreamsName, &num_features_)); } @@ -365,6 +401,12 @@ class BoostedTreesQuantileStreamResourceDeserializeOp : public OpKernel { // Iterating over all streams. for (int64 stream_idx = begin; stream_idx < end; stream_idx++) { const Tensor& bucket_boundaries_t = bucket_boundaries_list[stream_idx]; + OP_REQUIRES( + context, TensorShapeUtils::IsVector(bucket_boundaries_t.shape()), + errors::InvalidArgument("bucket boundaries for each stream must be " + "a vector, received shape ", + bucket_boundaries_t.shape().DebugString(), + " for stream ", stream_idx)); const auto& bucket_boundaries = bucket_boundaries_t.vec(); std::vector result; result.reserve(bucket_boundaries.size()); @@ -396,6 +438,9 @@ class BoostedTreesQuantileStreamResourceFlushOp : public OpKernel { explicit BoostedTreesQuantileStreamResourceFlushOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kGenerateQuantiles, &generate_quantiles_)); } @@ -412,6 +457,10 @@ class BoostedTreesQuantileStreamResourceFlushOp : public OpKernel { const Tensor* num_buckets_t; OP_REQUIRES_OK(context, context->input(kNumBucketsName, &num_buckets_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_buckets_t->shape()), + errors::InvalidArgument( + "num_buckets must be a scalar, got a tensor of shape ", + num_buckets_t->shape().DebugString())); const int64 num_buckets = num_buckets_t->scalar()(); const int64 num_streams = stream_resource->num_streams(); @@ -452,6 +501,9 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp explicit BoostedTreesQuantileStreamResourceGetBucketBoundariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -466,7 +518,9 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp mutex_lock l(*stream_resource->mutex()); const int64 num_streams = stream_resource->num_streams(); - CHECK_EQ(num_features_, num_streams); + OP_REQUIRES(context, num_streams == num_features_, + errors::Internal("Expected num_streams == num_features_, got ", + num_streams, " vs ", num_features_)); OpOutputList bucket_boundaries_list; OP_REQUIRES_OK(context, context->output_list(kBucketBoundariesName, &bucket_boundaries_list)); @@ -476,10 +530,10 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp for (int64 stream_idx = begin; stream_idx < end; stream_idx++) { const auto& boundaries = stream_resource->boundaries(stream_idx); Tensor* bucket_boundaries_t = nullptr; - OP_REQUIRES_OK(context, - bucket_boundaries_list.allocate( - stream_idx, {static_cast(boundaries.size())}, - &bucket_boundaries_t)); + OP_REQUIRES_OK( + context, bucket_boundaries_list.allocate( + stream_idx, {static_cast(boundaries.size())}, + &bucket_boundaries_t)); auto* quantiles_flat = bucket_boundaries_t->flat().data(); memcpy(quantiles_flat, boundaries.data(), sizeof(float) * boundaries.size()); @@ -510,6 +564,9 @@ class BoostedTreesBucketizeOp : public OpKernel { public: explicit BoostedTreesBucketizeOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } diff --git a/tensorflow/core/kernels/boosted_trees/resource_ops.cc b/tensorflow/core/kernels/boosted_trees/resource_ops.cc index 8036f2b20f36bb..2e55efb19dc597 100644 --- a/tensorflow/core/kernels/boosted_trees/resource_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/resource_ops.cc @@ -36,18 +36,32 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesCreateEnsembleOp : public OpKernel { public: explicit BoostedTreesCreateEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { // Get the stamp token. const Tensor* stamp_token_t; OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(stamp_token_t->shape()), + errors::InvalidArgument( + "stamp_token must be a scalar, got a tensor of shape ", + stamp_token_t->shape().DebugString())); int64 stamp_token = stamp_token_t->scalar()(); // Get the tree ensemble proto. const Tensor* tree_ensemble_serialized_t; OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized", &tree_ensemble_serialized_t)); + OP_REQUIRES( + context, + TensorShapeUtils::IsScalar(tree_ensemble_serialized_t->shape()), + errors::InvalidArgument( + "tree_ensemble_serialized must be a scalar, got a tensor of shape ", + tree_ensemble_serialized_t->shape().DebugString())); std::unique_ptr result( new BoostedTreesEnsembleResource()); if (!result->InitFromSerialized( @@ -76,7 +90,11 @@ REGISTER_KERNEL_BUILDER(Name("BoostedTreesCreateEnsemble").Device(DEVICE_CPU), class BoostedTreesGetEnsembleStatesOp : public OpKernel { public: explicit BoostedTreesGetEnsembleStatesOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { // Looks up the resource. @@ -139,7 +157,11 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesSerializeEnsembleOp : public OpKernel { public: explicit BoostedTreesSerializeEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { core::RefCountPtr tree_ensemble_resource; @@ -166,7 +188,11 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesDeserializeEnsembleOp : public OpKernel { public: explicit BoostedTreesDeserializeEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { core::RefCountPtr tree_ensemble_resource; @@ -177,12 +203,22 @@ class BoostedTreesDeserializeEnsembleOp : public OpKernel { // Get the stamp token. const Tensor* stamp_token_t; OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(stamp_token_t->shape()), + errors::InvalidArgument( + "stamp_token must be a scalar, got a tensor of shape ", + stamp_token_t->shape().DebugString())); int64 stamp_token = stamp_token_t->scalar()(); // Get the tree ensemble proto. const Tensor* tree_ensemble_serialized_t; OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized", &tree_ensemble_serialized_t)); + OP_REQUIRES( + context, + TensorShapeUtils::IsScalar(tree_ensemble_serialized_t->shape()), + errors::InvalidArgument( + "tree_ensemble_serialized must be a scalar, got a tensor of shape ", + tree_ensemble_serialized_t->shape().DebugString())); // Deallocate all the previous objects on the resource. tree_ensemble_resource->Reset(); OP_REQUIRES( diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index bb6709d32d5e2f..9090876afc8681 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -45,6 +45,9 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { explicit BoostedTreesCalculateBestGainsPerFeatureOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } @@ -83,17 +86,33 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { } const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); // Allocate output lists of tensors: @@ -251,6 +270,9 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { explicit BoostedTreesCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } @@ -262,6 +284,10 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a vector, received shape ", + node_id_range_t->shape().DebugString())); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive @@ -291,10 +317,14 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar()(); - DCHECK_GE(l1, 0); + OP_REQUIRES(context, l1 >= 0, + errors::InvalidArgument("l1 = ", l1, " but it should be >= 0")); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. - DCHECK_EQ(l1, 0); + OP_REQUIRES( + context, l1 == 0, + errors::InvalidArgument( + "l1 != 0 is not yet supported for multi-class regularization")); } const Tensor* l2_t; @@ -302,7 +332,8 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar()(); - DCHECK_GE(l2, 0); + OP_REQUIRES(context, l2 >= 0, + errors::InvalidArgument("l2 = ", l2, " but it should be >= 0")); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, @@ -595,6 +626,9 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { explicit BoostedTreesCalculateBestFeatureSplitV2( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } @@ -603,6 +637,10 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a vector, received shape ", + node_id_range_t->shape().DebugString())); OP_REQUIRES( context, node_id_range_t->dims() == 1, errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " @@ -623,7 +661,9 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { &stats_summaries_list)); // Infer dimensions of a stats_summary. - DCHECK_GT(stats_summaries_list.size(), 0); + OP_REQUIRES( + context, stats_summaries_list.size() >= 0, + errors::InvalidArgument("Got an empty list for stats_summaries_list")); const int32 feature_dims = stats_summaries_list[0].dim_size(1); // The last bucket is for default/missing value. const int32 num_buckets = stats_summaries_list[0].dim_size(2) - 1; @@ -640,7 +680,11 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // Vector of stats_summaries; each element is stats for feature of shape // [max_splits, feature_dim, num_buckets, logits_dim + hessian_dim]. std::vector::ConstTensor> stats_summaries; - DCHECK_EQ(stats_summaries_list.size(), num_features_); + OP_REQUIRES(context, stats_summaries_list.size() == num_features_, + errors::InvalidArgument( + "Invalid stats_summaries_list size, got ", + stats_summaries_list.size(), + " but expected to match num_features ", num_features_)); stats_summaries.reserve(num_features_); for (const auto& tensor : stats_summaries_list) { stats_summaries.emplace_back(tensor.tensor()); @@ -649,8 +693,15 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // Split types. const Tensor* split_types_t; OP_REQUIRES_OK(context, context->input("split_types", &split_types_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(split_types_t->shape()), + errors::InvalidArgument("split_types must be a vector, received shape ", + split_types_t->shape().DebugString())); const auto split_types = split_types_t->vec(); - DCHECK_EQ(split_types.size(), num_features_); + OP_REQUIRES(context, split_types.size() == num_features_, + errors::InvalidArgument( + "Invalid split_types size, got ", split_types.size(), + " but expected to match num_features ", num_features_)); // Validate. for (int i = 0; i < num_features_; ++i) { if (!(split_types(i) == kInequalitySplit || @@ -665,29 +716,59 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { const Tensor* candidate_feature_ids_t; OP_REQUIRES_OK(context, context->input("candidate_feature_ids", &candidate_feature_ids_t)); + OP_REQUIRES(context, + TensorShapeUtils::IsVector(candidate_feature_ids_t->shape()), + errors::InvalidArgument( + "candidate_feature_ids must be a vector, received shape ", + candidate_feature_ids_t->shape().DebugString())); const auto candidate_feature_ids = candidate_feature_ids_t->vec(); - DCHECK_EQ(candidate_feature_ids.size(), num_features_); + OP_REQUIRES(context, candidate_feature_ids.size() == num_features_, + errors::InvalidArgument( + "Invalid candidate_feature_ids size, got ", + candidate_feature_ids.size(), + " but expected to match num_features ", num_features_)); // L1, L2, tree_complexity, min_node_weight. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); - DCHECK_GE(l1, 0); + OP_REQUIRES(context, l1 >= 0, + errors::InvalidArgument("l1 = ", l1, " but it should be >= 0")); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. - DCHECK_EQ(l1, 0); + OP_REQUIRES( + context, l1 == 0, + errors::InvalidArgument( + "l1 != 0 is not yet supported for multi-class regularization")); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); - DCHECK_GE(l2, 0); + OP_REQUIRES(context, l2 >= 0, + errors::InvalidArgument("l2 = ", l2, " but it should be >= 0")); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -999,6 +1080,9 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { explicit BoostedTreesSparseCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; // TODO(crawles): Using logits_dim_ for multi-class split. OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); // TODO(tanzheny): Using this for equality split. @@ -1009,6 +1093,10 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a scalar, got a tensor of shape ", + node_id_range_t->shape().DebugString())); const auto node_id_range = node_id_range_t->vec(); OP_REQUIRES( context, node_id_range.size() == 2, @@ -1020,37 +1108,68 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { const Tensor* stats_summary_indices_t; OP_REQUIRES_OK(context, context->input("stats_summary_indices", &stats_summary_indices_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(stats_summary_indices_t->shape()), + errors::InvalidArgument( + "stats_summary_indices must be a matrix, got a tensor of shape ", + stats_summary_indices_t->shape().DebugString())); const auto stats_summary_indices = stats_summary_indices_t->matrix(); const int32 num_sparse_entries = stats_summary_indices_t->dim_size(0); const Tensor* stats_summary_values_t; OP_REQUIRES_OK(context, context->input("stats_summary_values", &stats_summary_values_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(stats_summary_values_t->shape()), + errors::InvalidArgument( + "stats_summary_values must be a vector, got a tensor of shape ", + stats_summary_values_t->shape().DebugString())); const auto stats_summary_values = stats_summary_values_t->vec(); const Tensor* stats_summary_shape_t; OP_REQUIRES_OK( context, context->input("stats_summary_shape", &stats_summary_shape_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(stats_summary_shape_t->shape()), + errors::InvalidArgument( + "stats_summary_shape must be a vector, got a tensor of shape ", + stats_summary_shape_t->shape().DebugString())); const auto stats_summary_shape = stats_summary_shape_t->vec(); const int32 num_buckets = stats_summary_shape(2) - 1; const int32 stats_dims = stats_summary_shape(3); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -1075,8 +1194,10 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { f_map.clear(); } previous_node_id = node_id; - DCHECK_LE(node_id_first, node_id); - DCHECK_LT(node_id, node_id_last); + OP_REQUIRES( + context, node_id_first <= node_id && node_id < node_id_last, + errors::InvalidArgument("node_id = ", node_id, " which is not in [", + node_id_first, ", ", node_id_last, ")")); const int32 feature_dim = stats_summary_indices(idx, 1); const int32 bucket_id = stats_summary_indices(idx, 2); const int32 stat_dim = stats_summary_indices(idx, 3); @@ -1310,6 +1431,9 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { public: explicit BoostedTreesMakeStatsSummaryOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); @@ -1319,10 +1443,18 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { // node_ids const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument( + "node_ids must be a vector, got a tensor of shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); // gradients const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument( + "gradients must be a matrix, got a tensor of shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); OP_REQUIRES( context, node_ids.size() == gradients.dimension(0), @@ -1333,7 +1465,17 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { // hessians const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument( + "hessians must be a matrix, got a tensor of shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); + OP_REQUIRES( + context, node_ids.size() == hessians.dimension(0), + errors::InvalidArgument( + "node_ids size should match 0th dim of hessians. node ids " + "size: ", + node_ids.size(), ", hessians dim0: ", hessians.dimension(0))); // bucketized_features OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features_list", @@ -1353,6 +1495,11 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { // Partition by node, and then bucketize. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { const auto& features = bucketized_features_list[feature_idx].vec(); + OP_REQUIRES( + context, features.size() == node_ids.size(), + errors::InvalidArgument("feature ", feature_idx, + " should have same size as node_ids, got ", + features.size(), " and ", node_ids.size())); for (int i = 0; i < batch_size; ++i) { const int32 node = node_ids(i); const int32 bucket = features(i); @@ -1384,6 +1531,9 @@ class BoostedTreesAggregateStatsOp : public OpKernel { public: explicit BoostedTreesAggregateStatsOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } @@ -1392,11 +1542,19 @@ class BoostedTreesAggregateStatsOp : public OpKernel { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument( + "node_ids must be a vector, got a tensor of shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument( + "gradients must be a matrix, got a tensor of shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); OP_REQUIRES( @@ -1409,11 +1567,19 @@ class BoostedTreesAggregateStatsOp : public OpKernel { // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument( + "hessians must be a matrix, got a tensor of shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); // feature. const Tensor* feature_t; OP_REQUIRES_OK(context, context->input("feature", &feature_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(feature_t->shape()), + errors::InvalidArgument( + "feature must be a matrix, got a tensor of shape ", + feature_t->shape().DebugString())); const auto feature = feature_t->matrix(); // Infer batch size, feature dimension and stats dimension. @@ -1569,7 +1735,8 @@ static void AddInstanceStatsToMap(const int32 instance, const int32 feature_dim, // Add statistics to StatsPartitionMap for bucket_id ranging from // (start_instance, start_feature_dim) to (end_instance, end_feature_dim), // inclusive on start and end instances, exclusive on end feature dim. -static void AddRangeStats(const int start_instance, const int end_instance, +static void AddRangeStats(OpKernelContext* const context, + const int start_instance, const int end_instance, const int start_feature_dim, const int end_feature_dim, StatsPartitionMap* stats_map, @@ -1578,9 +1745,15 @@ static void AddRangeStats(const int start_instance, const int end_instance, const TTypes::ConstVec& node_ids, const int32 feature_dims, const int32 bucket_id, const int32 logits_dims, const int32 stats_dims) { - DCHECK_LE(start_instance, end_instance); + OP_REQUIRES(context, start_instance <= end_instance, + errors::InvalidArgument( + "start_instance = ", start_instance, + " which is not at most end_instance=", end_instance)); if (start_instance == end_instance) { - DCHECK_LT(start_feature_dim, end_feature_dim); + OP_REQUIRES(context, start_feature_dim < end_feature_dim, + errors::InvalidArgument( + "start_feature_dim = ", start_feature_dim, + " which is not at most end_feature_dim=", end_feature_dim)); } for (int32 instance = start_instance; instance <= end_instance; ++instance) { const int32 start_f_dim = @@ -1599,6 +1772,9 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { explicit BoostedTreesSparseAggregateStatsOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } @@ -1607,29 +1783,71 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument("node_ids must be a vector, received shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); + const auto num_nodes = node_ids_t->NumElements(); + for (int i = 0; i < num_nodes; ++i) { + OP_REQUIRES( + context, node_ids(i) <= max_splits_, + errors::InvalidArgument( + "Nodes in node_ids must be at most max_splits. Node ", i, " is ", + node_ids(i), " which is greater than ", max_splits_)); + } // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument("gradients must be a matrix, received shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument("hessians must be a matrix, received shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); // feature indices. const Tensor* feature_indices_t; OP_REQUIRES_OK(context, context->input("feature_indices", &feature_indices_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(feature_indices_t->shape()), + errors::InvalidArgument( + "feature_indices must be a matrix, received shape ", + feature_indices_t->shape().DebugString())); + OP_REQUIRES( + context, feature_indices_t->shape().dim_size(1) == 2, + errors::InvalidArgument( + "feature_indices must be a matrix of shape [?, 2], received shape ", + feature_indices_t->shape().DebugString())); const auto feature_indices = feature_indices_t->matrix(); // feature values. const Tensor* feature_values_t; OP_REQUIRES_OK(context, context->input("feature_values", &feature_values_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(feature_values_t->shape()), + errors::InvalidArgument( + "feature_values must be a vector, received shape ", + feature_values_t->shape().DebugString())); const auto feature_values = feature_values_t->vec(); + const auto num_features = feature_values_t->NumElements(); + for (int i = 0; i < num_features; ++i) { + OP_REQUIRES( + context, feature_values(i) <= num_buckets_, + errors::InvalidArgument( + "Features in feature_values must be at most num_buckets. Node ", + i, " is ", feature_values(i), " which is greater than ", + num_buckets_)); + } // feature shape. const Tensor* feature_shape_t; @@ -1646,6 +1864,20 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { const int64 stats_dims = logits_dims + hessians_dims; const int64 num_sparse_entries = feature_indices_t->dim_size(0); const int32 feature_dims = feature_shape(1); + OP_REQUIRES(context, num_features == num_sparse_entries, + errors::InvalidArgument( + "Number of elements in feature_values must match number of " + "sparse entries in feature_indices. Got ", + num_features, " and ", num_sparse_entries)); + for (int i = 0; i < num_sparse_entries; ++i) { + const int32_t f_dim = feature_indices(i, 1); + OP_REQUIRES( + context, f_dim <= feature_dims, + errors::InvalidArgument( + "Got invalid feature index feature_indices(", i, "1) = ", f_dim, + " which is above ", feature_dims, + " (from feature_shape: ", feature_shape_t->DebugString(), ")")); + } OP_REQUIRES(context, num_sparse_entries <= batch_size * feature_dims, errors::InvalidArgument( "feature_indices dim0 should be <= gradients dim0 * " @@ -1659,14 +1891,35 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { int prev_instance = 0; int prev_f_dim = -1; + if (num_sparse_entries > 0) { + OP_REQUIRES( + context, feature_indices(0, 0) >= 0, + errors::InvalidArgument("feature_indices should be non-negative but " + "got feature_indices(0, 0)=", + feature_indices(0, 0))); + } + for (int i = 0; i < num_sparse_entries; ++i) { // the instance number within a batch const int32 instance = feature_indices(i, 0); - DCHECK_LE(instance, batch_size); - DCHECK_GE(instance, prev_instance); + OP_REQUIRES(context, instance <= batch_size, + errors::InvalidArgument("feature_indices(", i, + "0) should be at most batch size (", + batch_size, " but got ", instance)); + OP_REQUIRES( + context, instance >= prev_instance, + errors::InvalidArgument( + "feature_indices should be increasing but got feature_indices(", + i, ", 0) < ", prev_instance, " (feature_indices(", i - 1, "0))")); // the node id within a tree. + // We don't need the node id here, we just validate that the `instance` + // is a valid index as this is needed later in the code. const int32 node_id = node_ids(instance); - DCHECK_LE(node_id, max_splits_); + OP_REQUIRES(context, instance < num_nodes, + errors::InvalidArgument("feature_indices(", i, + "0) is not a valid index in the " + "node_ids vector (must be less than ", + num_nodes, ", got ", instance, ")")); // the feature dimension. const int32 f_dim = feature_indices(i, 1); DCHECK_LE(f_dim, feature_dims); @@ -1677,8 +1930,8 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { // Add statistics for the missing entries into default bucket. // The last bucket is default bucket. const int missing_entry_bucket = num_buckets_; - AddRangeStats(prev_instance, instance, prev_f_dim, f_dim, &stats_map, - gradients, hessians, node_ids, feature_dims, + AddRangeStats(context, prev_instance, instance, prev_f_dim, f_dim, + &stats_map, gradients, hessians, node_ids, feature_dims, missing_entry_bucket, logits_dims, stats_dims); prev_instance = instance; prev_f_dim = f_dim; @@ -1687,9 +1940,9 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, &stats_map, gradients, hessians, node_ids); } - AddRangeStats(prev_instance, batch_size - 1, prev_f_dim, feature_dims, - &stats_map, gradients, hessians, node_ids, feature_dims, - num_buckets_, logits_dims, stats_dims); + AddRangeStats(context, prev_instance, batch_size - 1, prev_f_dim, + feature_dims, &stats_map, gradients, hessians, node_ids, + feature_dims, num_buckets_, logits_dims, stats_dims); // Serialize statistics info map to tensor output. const int64 num_slots = stats_map.size() * stats_dims; diff --git a/tensorflow/core/kernels/boosted_trees/training_ops.cc b/tensorflow/core/kernels/boosted_trees/training_ops.cc index e91677740e7694..b7ef1e7dbbc2a7 100644 --- a/tensorflow/core/kernels/boosted_trees/training_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/training_ops.cc @@ -35,6 +35,9 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { public: explicit BoostedTreesUpdateEnsembleOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); int32 pruning_index; @@ -68,14 +71,26 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { const Tensor* feature_ids_t; OP_REQUIRES_OK(context, context->input("feature_ids", &feature_ids_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(feature_ids_t->shape()), + errors::InvalidArgument("feature_ids must be a vector, received shape ", + feature_ids_t->shape().DebugString())); const auto feature_ids = feature_ids_t->vec(); const Tensor* max_depth_t; OP_REQUIRES_OK(context, context->input("max_depth", &max_depth_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_depth_t->shape()), + errors::InvalidArgument( + "max_depth must be a scalar, got a tensor of shape ", + max_depth_t->shape().DebugString())); const auto max_depth = max_depth_t->scalar()(); const Tensor* learning_rate_t; OP_REQUIRES_OK(context, context->input("learning_rate", &learning_rate_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(learning_rate_t->shape()), + errors::InvalidArgument( + "learning_rate must be a scalar, got a tensor of shape ", + learning_rate_t->shape().DebugString())); const auto learning_rate = learning_rate_t->scalar()(); // Op does not support multi-class, the V2 op below does however. int32 logits_dimension = 1; @@ -176,11 +191,50 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { std::map* best_split_per_node) { // Find best split per node going through every feature candidate. for (int64 feature_idx = 0; feature_idx < num_features_; ++feature_idx) { + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(node_ids_list[feature_idx].shape()), + errors::InvalidArgument( + "Each node_id in node_ids_list must be a vector, received shape ", + node_ids_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& node_ids = node_ids_list[feature_idx].vec(); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(gains_list[feature_idx].shape()), + errors::InvalidArgument( + "Each gain in gains_list must be a vector, received shape ", + gains_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& gains = gains_list[feature_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(thresholds_list[feature_idx].shape()), + errors::InvalidArgument( + "Each threshold in thresholds_list must be a vector, received " + "shape ", + thresholds_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& thresholds = thresholds_list[feature_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsMatrix( + left_node_contribs_list[feature_idx].shape()), + errors::InvalidArgument( + "Each left_node_contribs in left_node_contribs_list must be a " + "matrix, received shape ", + left_node_contribs_list[feature_idx].shape().DebugString(), + " at index ", feature_idx)); const auto& left_node_contribs = left_node_contribs_list[feature_idx].matrix(); + OP_REQUIRES( + context, + TensorShapeUtils::IsMatrix( + right_node_contribs_list[feature_idx].shape()), + errors::InvalidArgument( + "Each right_node_contribs in right_node_contribs_list must be a " + "matrix, received shape ", + right_node_contribs_list[feature_idx].shape().DebugString(), + " at index ", feature_idx)); const auto& right_node_contribs = right_node_contribs_list[feature_idx].matrix(); @@ -234,6 +288,9 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { public: explicit BoostedTreesUpdateEnsembleV2Op(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_groups", &num_groups_)); } @@ -274,14 +331,26 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { const Tensor* max_depth_t; OP_REQUIRES_OK(context, context->input("max_depth", &max_depth_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_depth_t->shape()), + errors::InvalidArgument( + "max_depth must be a scalar, got a tensor of shape ", + max_depth_t->shape().DebugString())); const auto max_depth = max_depth_t->scalar()(); const Tensor* learning_rate_t; OP_REQUIRES_OK(context, context->input("learning_rate", &learning_rate_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(learning_rate_t->shape()), + errors::InvalidArgument( + "learning_rate must be a scalar, got a tensor of shape ", + learning_rate_t->shape().DebugString())); const auto learning_rate = learning_rate_t->scalar()(); const Tensor* pruning_mode_t; OP_REQUIRES_OK(context, context->input("pruning_mode", &pruning_mode_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(pruning_mode_t->shape()), + errors::InvalidArgument( + "pruning_mode must be a scalar, got a tensor of shape ", + pruning_mode_t->shape().DebugString())); const auto pruning_mode = static_cast(pruning_mode_t->scalar()()); // Find best splits for each active node. @@ -327,7 +396,7 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { boosted_trees::SplitTypeWithDefault split_type_with_default; bool parsed = boosted_trees::SplitTypeWithDefault_Parse( split_type, &split_type_with_default); - DCHECK(parsed); + OP_REQUIRES(context, parsed, errors::Internal("Parse failed")); if (split_type_with_default == boosted_trees::EQUALITY_DEFAULT_RIGHT) { // Add equality split to the node. ensemble_resource->AddCategoricalSplitNode(current_tree, split_entry, @@ -396,15 +465,75 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { std::map* best_split_per_node) { // Find best split per node going through every feature candidate. for (int64 group_idx = 0; group_idx < num_groups_; ++group_idx) { + OP_REQUIRES( + context, TensorShapeUtils::IsVector(node_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each node_id in node_ids_list must be a vector, received shape ", + node_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& node_ids = node_ids_list[group_idx].vec(); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(gains_list[group_idx].shape()), + errors::InvalidArgument( + "Each gain in gains_list must be a vector, received shape ", + gains_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& gains = gains_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(feature_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each feature_id in feature_ids_lists must be a vector, received " + "shape ", + feature_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& feature_ids = feature_ids_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(thresholds_list[group_idx].shape()), + errors::InvalidArgument( + "Each threshold in thresholds_list must be a vector, received " + "shape ", + thresholds_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& thresholds = thresholds_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(dimension_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each dimension_id in dimension_ids_list must be a vector, " + "received shape ", + dimension_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& dimension_ids = dimension_ids_list[group_idx].vec(); + OP_REQUIRES(context, + TensorShapeUtils::IsMatrix( + left_node_contribs_list[group_idx].shape()), + errors::InvalidArgument( + "Each left_node_contribs in right_node_contribs_list " + "must be a matrix, received shape ", + left_node_contribs_list[group_idx].shape().DebugString(), + " at index ", group_idx)); const auto& left_node_contribs = left_node_contribs_list[group_idx].matrix(); + OP_REQUIRES(context, + TensorShapeUtils::IsMatrix( + right_node_contribs_list[group_idx].shape()), + errors::InvalidArgument( + "Each right_node_contribs in right_node_contribs_list " + "must be a matrix, received shape ", + right_node_contribs_list[group_idx].shape().DebugString(), + " at index ", group_idx)); const auto& right_node_contribs = right_node_contribs_list[group_idx].matrix(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(split_types_list[group_idx].shape()), + errors::InvalidArgument( + "Each split_type in split_types_list must be a vector, received " + "shape ", + split_types_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& split_types = split_types_list[group_idx].vec(); for (size_t candidate_idx = 0; candidate_idx < node_ids.size(); @@ -457,7 +586,11 @@ REGISTER_KERNEL_BUILDER(Name("BoostedTreesUpdateEnsembleV2").Device(DEVICE_CPU), class BoostedTreesCenterBiasOp : public OpKernel { public: explicit BoostedTreesCenterBiasOp(OpKernelConstruction* const context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* const context) override { // Get decision tree ensemble. @@ -479,9 +612,17 @@ class BoostedTreesCenterBiasOp : public OpKernel { // Get the regularization options. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); // For now, assume 1-dimensional weight on leaves. @@ -489,7 +630,8 @@ class BoostedTreesCenterBiasOp : public OpKernel { float unused_gain; // TODO(crawles): Support multiclass. - DCHECK_EQ(logits_dim, 1); + OP_REQUIRES(context, logits_dim == 1, + errors::Internal("Expected logits_dim == 1, got ", logits_dim)); Eigen::VectorXf gradients_mean(1); Eigen::VectorXf hessians_mean(1); gradients_mean[0] = mean_gradients_t->flat()(0); @@ -506,7 +648,9 @@ class BoostedTreesCenterBiasOp : public OpKernel { current_bias = logits; } else { const auto& current_biases = ensemble_resource->node_value(0, 0); - DCHECK_EQ(current_biases.size(), 1); + OP_REQUIRES(context, current_biases.size() == 1, + errors::Internal("Expected current_biases.size() == 1, got ", + current_biases.size())); current_bias = current_biases[0]; continue_centering = std::abs(logits / current_bias) > kMinDeltaForCenterBias; diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index 40aa1fe458c1ee..74929d8bc18ce4 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + #include "absl/container/flat_hash_map.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" @@ -23,6 +25,9 @@ limitations under the License. namespace tensorflow { +// Don't allocate too large `BatchedMap` objects +static int kMaxBatches = std::numeric_limits::max(); + template using BatchedMap = std::vector>; @@ -185,6 +190,44 @@ class SparseCount : public OpKernel { errors::InvalidArgument( "Input indices must be a 2-dimensional tensor. Got: ", indices.shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(values.shape()), + errors::InvalidArgument("Input values must be a vector. Got: ", + values.shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(shape.shape()), + errors::InvalidArgument("Input shape must be a vector. Got: ", + shape.shape().DebugString())); + OP_REQUIRES(context, + values.shape().dim_size(0) == indices.shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices.", + "Got ", values.shape().dim_size(0), + " values, indices shape: ", indices.shape().DebugString())); + OP_REQUIRES( + context, shape.shape().dim_size(0) == indices.shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices.", + "Got ", shape.shape().dim_size(0), + " dimensions, indices shape: ", indices.shape().DebugString())); + OP_REQUIRES(context, shape.NumElements() > 0, + errors::InvalidArgument( + "The shape argument requires at least one element.")); + // Validate indices: each index must be valid for the corresponding + // dimension. This could be possibly done better. + const auto indices_values = indices.matrix(); + const auto shape_vector = shape.vec(); + int num_values = values.NumElements(); // same as first dim of indices + int rank = indices.shape().dim_size(1); + for (int i = 0; i < num_values; ++i) { + for (int j = 0; j < rank; ++j) { + OP_REQUIRES( + context, + indices_values(i, j) >= 0 && indices_values(i, j) < shape_vector(j), + errors::InvalidArgument( + "Invalid index value at ", i, ": dimension ", j, " has value ", + indices_values(i, j), " which is not in [0, ", shape_vector(j), + ") (as given by dense shape ", shape.DebugString())); + } + } if (use_weights) { OP_REQUIRES( @@ -195,14 +238,12 @@ class SparseCount : public OpKernel { "; values shape: ", values.shape().DebugString())); } - OP_REQUIRES(context, shape.NumElements() != 0, - errors::InvalidArgument( - "The shape argument requires at least one element.")); - bool is_1d = shape.NumElements() == 1; - auto shape_vector = shape.flat(); int num_batches = is_1d ? 1 : shape_vector(0); - int num_values = values.NumElements(); + OP_REQUIRES( + context, 0 < num_batches && num_batches < kMaxBatches, + errors::InvalidArgument("Cannot allocate ", num_batches, + " batches, is the dense shape too wide?")); for (int b = 0; b < shape_vector.size(); b++) { OP_REQUIRES(context, shape_vector(b) >= 0, @@ -217,7 +258,6 @@ class SparseCount : public OpKernel { "Got ", num_values, " values, indices shape: ", indices.shape().DebugString())); - const auto indices_values = indices.matrix(); const auto values_values = values.flat(); const auto weight_values = weights.flat(); diff --git a/tensorflow/core/kernels/cwise_ops_common.h b/tensorflow/core/kernels/cwise_ops_common.h index 4f2c83322ba00f..27fcbf2f33973c 100644 --- a/tensorflow/core/kernels/cwise_ops_common.h +++ b/tensorflow/core/kernels/cwise_ops_common.h @@ -87,7 +87,17 @@ class BinaryOp : public BinaryOpShared { void Compute(OpKernelContext* ctx) override { const Tensor& input_0 = ctx->input(0); + OP_REQUIRES(ctx, input_0.dtype() == DataTypeToEnum::v(), + errors::InvalidArgument( + "Expected tensor of type ", + DataTypeString(DataTypeToEnum::v()), " but got type ", + DataTypeString(input_0.dtype()))); const Tensor& input_1 = ctx->input(1); + OP_REQUIRES(ctx, input_1.dtype() == DataTypeToEnum::v(), + errors::InvalidArgument( + "Expected tensor of type ", + DataTypeString(DataTypeToEnum::v()), " but got type ", + DataTypeString(input_1.dtype()))); const Device& eigen_device = ctx->eigen_device(); bool error = false; bool* const error_ptr = Functor::has_errors ? &error : nullptr; diff --git a/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc b/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc index 464c049743a76b..1a7da98987fba9 100644 --- a/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc @@ -39,6 +39,22 @@ namespace experimental { PrivateThreadPoolDatasetOp::kDatasetType; /* static */ constexpr const char* const PrivateThreadPoolDatasetOp::kDatasetOp; +namespace { +// To prevent integer overflow issues when allocating threadpool memory for an +// unreasonable number of threads. +constexpr int kThreadLimit = 65536; + +Status ValidateNumThreads(int32_t num_threads) { + if (num_threads < 0) { + return errors::InvalidArgument("`num_threads` must be >= 0"); + } + if (num_threads >= kThreadLimit) { + return errors::InvalidArgument("`num_threads` must be < ", kThreadLimit); + } + return Status::OK(); +} +} // namespace + class ThreadPoolResource : public ResourceBase { public: ThreadPoolResource(Env* env, const ThreadOptions& thread_options, @@ -83,9 +99,7 @@ class ThreadPoolHandleOp : public OpKernel { OP_REQUIRES_OK(ctx, ctx->GetAttr("num_threads", &num_threads_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("max_intra_op_parallelism", &max_intra_op_parallelism_)); - OP_REQUIRES( - ctx, num_threads_ > 0, - errors::InvalidArgument("`num_threads` must be greater than zero.")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads_)); } // The resource is deleted from the resource manager only when it is private @@ -530,8 +544,7 @@ void PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(OpKernelContext* ctx, DatasetBase* input, int32 num_threads, DatasetBase** output) { - OP_REQUIRES(ctx, num_threads >= 0, - errors::InvalidArgument("`num_threads` must be >= 0")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads)); *output = new Dataset(ctx, DatasetContext(DatasetContext::Params( {PrivateThreadPoolDatasetOp::kDatasetType, @@ -545,8 +558,7 @@ void PrivateThreadPoolDatasetOp::MakeDataset(OpKernelContext* ctx, int64 num_threads = 0; OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "num_threads", &num_threads)); - OP_REQUIRES(ctx, num_threads >= 0, - errors::InvalidArgument("`num_threads` must be >= 0")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads)); *output = new Dataset(ctx, input, num_threads); } diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc index 7cd31cc6188ea3..e0bf02ff3564e0 100644 --- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc +++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc @@ -238,28 +238,29 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel { OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), - errors::InvalidArgument( - "Input indices should be a matrix but received shape ", - indices->shape().DebugString())); - - const auto num_indices = indices->NumElements(); - const auto num_values = values->NumElements(); - if (num_indices == 0 || num_values == 0) { - OP_REQUIRES(ctx, num_indices == num_values, - errors::InvalidArgument( - "If indices or values are empty, the other one must also " - "be. Got indices of shape ", - indices->shape().DebugString(), " and values of shape ", - values->shape().DebugString())); - } + errors::InvalidArgument("Input indices must be a matrix. Got: ", + indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), - errors::InvalidArgument( - "Input values should be a vector but received shape ", - indices->shape().DebugString())); + errors::InvalidArgument("Input values must be a vector. Got: ", + values->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), + errors::InvalidArgument("Input shape must be a vector. Got: ", + dense_shape->shape().DebugString())); + OP_REQUIRES( + ctx, values->shape().dim_size(0) == indices->shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices. ", "Got ", + values->shape().dim_size(0), + " values, indices shape: ", indices->shape().DebugString())); + OP_REQUIRES( + ctx, dense_shape->shape().dim_size(0) == indices->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", dense_shape->shape().dim_size(0), + " dimensions, indices shape: ", indices->shape().DebugString())); + OP_REQUIRES(ctx, dense_shape->NumElements() > 0, errors::InvalidArgument( - "Input shape should be a vector but received shape ", - dense_shape->shape().DebugString())); + "The shape argument requires at least one element.")); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. @@ -278,11 +279,12 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel { previous_batch_index = next_batch_index; } gtl::InlinedVector std_order(dense_shape->NumElements(), 0); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape( + dense_shape->vec(), &shape)); sparse::SparseTensor tensor; - OP_REQUIRES_OK( - ctx, sparse::SparseTensor::Create( - *indices, *values, TensorShape(dense_shape->vec()), - std_order, &tensor)); + OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(*indices, *values, shape, + std_order, &tensor)); *output = new Dataset(ctx, std::move(tensor)); } diff --git a/tensorflow/core/kernels/dequantize_op.cc b/tensorflow/core/kernels/dequantize_op.cc index 7a90e0c340b093..ddb70167b58852 100644 --- a/tensorflow/core/kernels/dequantize_op.cc +++ b/tensorflow/core/kernels/dequantize_op.cc @@ -94,6 +94,11 @@ class DequantizeOp : public OpKernel { const Tensor& input_min_tensor = ctx->input(1); const Tensor& input_max_tensor = ctx->input(2); + OP_REQUIRES( + ctx, axis_ < input.dims(), + errors::InvalidArgument("Axis must be less than input dimension(", + input.dims(), "), got ", axis_)); + int num_slices = 1; if (axis_ > -1) { num_slices = input.dim_size(axis_); diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index 7c396126427473..818a5086ea7f46 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -314,12 +314,22 @@ class FractionalAvgPoolGradOp : public OpKernel { int64 in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); + OP_REQUIRES(context, in_row_start >= 0 && in_row_end >= 0, + errors::InvalidArgument( + "Row sequence tensor values must not be negative, got ", + row_seq_tensor_flat)); + for (int64 c = 0; c < out_cols; ++c) { const int64 in_col_start = col_seq_tensor_flat(c); int64 in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); + OP_REQUIRES( + context, in_col_start >= 0 && in_col_end >= 0, + errors::InvalidArgument( + "Column sequence tensor values must not be negative, got ", + col_seq_tensor_flat)); const int64 num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64 out_index = (b * out_rows + r) * out_cols + c; diff --git a/tensorflow/core/kernels/fractional_max_pool_op.cc b/tensorflow/core/kernels/fractional_max_pool_op.cc index 1a2a783d135c54..b17463c5127fa0 100644 --- a/tensorflow/core/kernels/fractional_max_pool_op.cc +++ b/tensorflow/core/kernels/fractional_max_pool_op.cc @@ -83,6 +83,13 @@ class FractionalMaxPoolOp : public OpKernel { std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); + + OP_REQUIRES( + context, input_size[i] >= pooling_ratio_[i], + errors::InvalidArgument("Pooling ratio is higher than input " + "dimension size for dimension ", + i, ". Input dim size: ", input_size[i], + " pooling ratio: ", pooling_ratio_[i])); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { diff --git a/tensorflow/core/kernels/image/decode_image_op.cc b/tensorflow/core/kernels/image/decode_image_op.cc index 61b126fb81e3cb..242cc3ea0eb095 100644 --- a/tensorflow/core/kernels/image/decode_image_op.cc +++ b/tensorflow/core/kernels/image/decode_image_op.cc @@ -18,6 +18,8 @@ limitations under the License. #include #include +#include "tensorflow/core/lib/gtl/cleanup.h" + #define EIGEN_USE_THREADS #include "absl/strings/escaping.h" @@ -326,6 +328,16 @@ class DecodeImageV2Op : public OpKernel { context, png::CommonInitDecode(input, channels_, channel_bits, &decode), errors::InvalidArgument("Invalid PNG. Failed to initialize decoder.")); + // If we reach this point, then there is data in `decode` which must be + // freed by the time we end execution in this function. We cannot call + // `png::CommonFreeDecode()` before an `OP_REQUIRES` because if + // `OP_REQUIRES` constraint is satisfied then the data would be freed + // prematurely. Instead, let's use a `Cleanup` object. + auto cleanup = gtl::MakeCleanup([&decode]() { + std::cerr << "Cleanup called...\n"; + png::CommonFreeDecode(&decode); + }); + // Verify that width and height are not too large: // - verify width and height don't overflow int. // - width can later be multiplied by channels_ and sizeof(uint16), so @@ -339,22 +351,24 @@ class DecodeImageV2Op : public OpKernel { if (width != static_cast(decode.width) || width <= 0 || width >= (1LL << 27) || height != static_cast(decode.height) || height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) { - png::CommonFreeDecode(&decode); OP_REQUIRES(context, false, errors::InvalidArgument("PNG size too large for int: ", decode.width, " by ", decode.height)); } Tensor* output = nullptr; - Status status; // By the existing API, we support decoding PNG with `DecodeGif` op. // We need to make sure to return 4-D shapes when using `DecodeGif`. if (op_type_ == "DecodeGif") { - status = context->allocate_output( - 0, TensorShape({1, height, width, decode.channels}), &output); + OP_REQUIRES_OK( + context, + context->allocate_output( + 0, TensorShape({1, height, width, decode.channels}), &output)); } else { - status = context->allocate_output( - 0, TensorShape({height, width, decode.channels}), &output); + OP_REQUIRES_OK( + context, + context->allocate_output( + 0, TensorShape({height, width, decode.channels}), &output)); } if (op_type_ == "DecodeBmp") { @@ -374,9 +388,6 @@ class DecodeImageV2Op : public OpKernel { "detected PNG.")); } - if (!status.ok()) png::CommonFreeDecode(&decode); - OP_REQUIRES_OK(context, status); - if (data_type_ == DataType::DT_UINT8) { OP_REQUIRES( context, diff --git a/tensorflow/core/kernels/map_stage_op.cc b/tensorflow/core/kernels/map_stage_op.cc index fd03d2e187e1d8..6cc5af6fb35664 100644 --- a/tensorflow/core/kernels/map_stage_op.cc +++ b/tensorflow/core/kernels/map_stage_op.cc @@ -533,6 +533,11 @@ class MapStageOp : public OpKernel { OP_REQUIRES(ctx, key_tensor->NumElements() > 0, errors::InvalidArgument("key must not be empty")); + OP_REQUIRES(ctx, key_tensor->NumElements() == 1, + errors::InvalidArgument( + "key must be an int64 scalar, got tensor with shape: ", + key_tensor->shape())); + // Create copy for insertion into Staging Area Tensor key(*key_tensor); diff --git a/tensorflow/core/kernels/quantized_pooling_ops.cc b/tensorflow/core/kernels/quantized_pooling_ops.cc index 663ceb0641e202..8f042e88f7aad2 100644 --- a/tensorflow/core/kernels/quantized_pooling_ops.cc +++ b/tensorflow/core/kernels/quantized_pooling_ops.cc @@ -15,6 +15,8 @@ limitations under the License. // See docs in ../ops/nn_ops.cc. +#include "tensorflow/core/framework/op_requires.h" +#include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" @@ -117,6 +119,18 @@ class QuantizedMaxPoolingOp : public MaxPoolingOp { : MaxPoolingOp(context) {} void Compute(OpKernelContext* context) override { + auto min_input_tensor = context->input(1); + auto max_input_tensor = context->input(2); + OP_REQUIRES( + context, min_input_tensor.NumElements() == 1, + errors::InvalidArgument( + "min_input must be a scalar float value, got tensor with shape ", + min_input_tensor.shape())); + OP_REQUIRES( + context, max_input_tensor.NumElements() == 1, + errors::InvalidArgument( + "max_input must be a scalar float value, got tensor with shape ", + max_input_tensor.shape())); const float min_input = context->input(1).flat()(0); const float max_input = context->input(2).flat()(0); MaxPoolingOp::Compute(context); diff --git a/tensorflow/core/kernels/reshape_util.cc b/tensorflow/core/kernels/reshape_util.cc index 32061c836b0f70..e2cc218d63e42d 100644 --- a/tensorflow/core/kernels/reshape_util.cc +++ b/tensorflow/core/kernels/reshape_util.cc @@ -23,8 +23,10 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" @@ -99,7 +101,9 @@ void ReshapeSparseTensor(OpKernelContext *context, target_shape_in.shape().DebugString())); const int64 output_rank = target_shape_in.NumElements(); - const TensorShape input_shape(input_shape_in.vec()); + TensorShape input_shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape( + input_shape_in.vec(), &input_shape)); const int64 dense_size = input_shape.num_elements(); const int64 nnz = input_indices_in.shape().dim_size(0); diff --git a/tensorflow/core/kernels/segment_reduction_ops_impl.h b/tensorflow/core/kernels/segment_reduction_ops_impl.h index 6a9ec8934deffc..fdb70ed3257721 100644 --- a/tensorflow/core/kernels/segment_reduction_ops_impl.h +++ b/tensorflow/core/kernels/segment_reduction_ops_impl.h @@ -18,6 +18,10 @@ limitations under the License. #ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_IMPL_H_ #define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_IMPL_H_ +#include + +#include "tensorflow/core/framework/op_requires.h" +#include "tensorflow/core/platform/types.h" #define EIGEN_USE_THREADS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU @@ -460,6 +464,7 @@ class SparseSegmentReductionOpBase : public OpKernel { bool is_mean, bool is_sqrtn, bool has_num_segments, T default_value) : OpKernel(context), + dtidx_(DataTypeToEnum::v()), is_mean_(is_mean), is_sqrtn_(is_sqrtn), has_num_segments_(has_num_segments), @@ -499,10 +504,20 @@ class SparseSegmentReductionOpBase : public OpKernel { const auto segment_vec = segment_ids.vec(); // Note that the current implementation assumes that segment_vec values are // sorted. + const SegmentId last_segment_id = + num_indices > 0 ? segment_vec(num_indices - 1) : 0; + int64_t limit = dtidx_ == DataType::DT_INT32 ? kint32max : kint64max; + + OP_REQUIRES( + context, last_segment_id < limit, + errors::InvalidArgument("Last segment id must be < kintmax, got ", + last_segment_id, " limit ", limit)); + const SegmentId last_segment_id_plus_one = num_indices > 0 ? internal::SubtleMustCopy(segment_vec(num_indices - 1)) + 1 : 0; + if (has_num_segments_) { OP_REQUIRES( context, output_rows >= last_segment_id_plus_one, @@ -514,7 +529,7 @@ class SparseSegmentReductionOpBase : public OpKernel { errors::InvalidArgument("segment ids must be >= 0")); TensorShape output_shape = input.shape(); - output_shape.set_dim(0, output_rows); + OP_REQUIRES_OK(context, output_shape.SetDimWithStatus(0, output_rows)); // Note that we do not initialize the output buffer with a default value, so // we need to explicitly set missing indices to the default value. @@ -601,6 +616,7 @@ class SparseSegmentReductionOpBase : public OpKernel { } private: + const DataType dtidx_; template using EnableIfBfloat16 = typename std::enable_if::value, int>::type; diff --git a/tensorflow/core/kernels/sequence_ops.cc b/tensorflow/core/kernels/sequence_ops.cc index ebf31d61e5f85b..7ae61e7b6a47c4 100644 --- a/tensorflow/core/kernels/sequence_ops.cc +++ b/tensorflow/core/kernels/sequence_ops.cc @@ -71,13 +71,19 @@ class RangeOp : public OpKernel { errors::InvalidArgument( "Requires start >= limit when delta < 0: ", start, "/", limit)); } - int64 size = 0; - if (std::is_integral::value) { - size = static_cast( - (std::abs(limit - start) + std::abs(delta) - 1) / std::abs(delta)); - } else { - size = static_cast(std::ceil(std::abs((limit - start) / delta))); - } + auto size_auto = (std::is_integral::value + ? (Eigen::numext::abs(limit - start) + + Eigen::numext::abs(delta) - T(1)) / + Eigen::numext::abs(delta) + : Eigen::numext::ceil( + Eigen::numext::abs((limit - start) / delta))); + OP_REQUIRES( + context, size_auto <= std::numeric_limits::max(), + errors::InvalidArgument("Requires ((limit - start) / delta) <= ", + std::numeric_limits::max())); + + int64 size = static_cast(size_auto); + TensorShape shape; OP_REQUIRES_OK(context, shape.AddDimWithStatus(size)); Tensor* out = nullptr; diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc index 07cc6c86a1735e..de0ef96aa87c6a 100644 --- a/tensorflow/core/kernels/serialize_sparse_op.cc +++ b/tensorflow/core/kernels/serialize_sparse_op.cc @@ -23,9 +23,11 @@ limitations under the License. #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/variant.h" @@ -364,7 +366,10 @@ class SerializeManySparseOp : public OpKernel { errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); - TensorShape tensor_input_shape(input_shape->vec()); + TensorShape tensor_input_shape; + OP_REQUIRES_OK(context, + TensorShape::BuildTensorShape(input_shape->vec(), + &tensor_input_shape)); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc index fd6c7040637f51..ea91f82eec1e6c 100644 --- a/tensorflow/core/kernels/set_kernels.cc +++ b/tensorflow/core/kernels/set_kernels.cc @@ -35,6 +35,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -67,8 +68,9 @@ Status SparseTensorFromContext(OpKernelContext* ctx, const int32 base_index, bool validate_indices, sparse::SparseTensor* tensor) { // Assume row-major order. - const TensorShape shape = - TensorShape(ctx->input(base_index + 2).vec()); + TensorShape shape; + TF_RETURN_IF_ERROR(TensorShape::BuildTensorShape( + ctx->input(base_index + 2).vec(), &shape)); CheckRankAtLeast2(ctx, shape); std::vector order(shape.dims()); std::iota(order.begin(), order.end(), 0); diff --git a/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc b/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc index 2548ceaa57cc63..dcdad591438f25 100644 --- a/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc +++ b/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc @@ -76,10 +76,18 @@ class SparseTensorToCSRSparseMatrixCPUOp : public OpKernel { const int64 total_nnz = values.NumElements(); // Allocate output Tensors. - Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); - Tensor csr_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); - Tensor csr_row_ptr(cpu_allocator(), DT_INT32, - TensorShape({(num_rows + 1) * batch_size})); + TensorShape batch_ptr_shape; + OP_REQUIRES_OK( + ctx, TensorShape::BuildTensorShape({batch_size + 1}, &batch_ptr_shape)); + Tensor batch_ptr(cpu_allocator(), DT_INT32, batch_ptr_shape); + TensorShape csr_col_ind_shape; + OP_REQUIRES_OK( + ctx, TensorShape::BuildTensorShape({total_nnz}, &csr_col_ind_shape)); + Tensor csr_col_ind(cpu_allocator(), DT_INT32, csr_col_ind_shape); + TensorShape csr_row_ind_shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape( + {(num_rows + 1) * batch_size}, &csr_row_ind_shape)); + Tensor csr_row_ptr(cpu_allocator(), DT_INT32, csr_row_ind_shape); // Fill the row pointers with zeros. functor::SetZeroFunctor set_zero; diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index dda05dbc3b8cb2..6e3f4969bcf14e 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -78,15 +78,30 @@ class SparseDenseBinaryOpShared : public OpKernel { "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); + OP_REQUIRES( + ctx, TensorShapeUtils::IsVector(shape_t->shape()), + errors::InvalidArgument("Input sp_shape must be a vector. Got: ", + shape_t->shape().DebugString())); OP_REQUIRES( ctx, values_t->dim_size(0) == indices_t->dim_size(0), errors::InvalidArgument( "The first dimension of values and indices should match. (", values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); + OP_REQUIRES( + ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", shape_t->shape().dim_size(0), + " dimensions, indices shape: ", indices_t->shape().DebugString())); + OP_REQUIRES(ctx, shape_t->NumElements() > 0, + errors::InvalidArgument( + "The shape argument requires at least one element.")); const auto indices_mat = indices_t->matrix(); const auto shape_vec = shape_t->vec(); - const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec)); + TensorShape lhs_shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape)); + const auto lhs_dims = BCast::FromShape(lhs_shape); const auto rhs_dims = BCast::FromShape(dense_t->shape()); BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims. diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc index eb56b7390b0f9a..ef0d89069ce6fa 100644 --- a/tensorflow/core/kernels/sparse_reduce_op.cc +++ b/tensorflow/core/kernels/sparse_reduce_op.cc @@ -18,8 +18,10 @@ limitations under the License. #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" @@ -172,10 +174,13 @@ class SparseReduceOp : public OpKernel { // making deep copies here. Remove this if/when we change Reorder()'s // semantics. const auto shape_vec = shape_t->vec(); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &shape)); + SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create( tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_vec), &sp)); + shape, &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); @@ -275,10 +280,13 @@ class SparseReduceSparseOp : public OpKernel { OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t)); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_t->vec(), + &shape)); SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_t->vec()), &sp)); + shape, &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); diff --git a/tensorflow/core/kernels/sparse_slice_op.cc b/tensorflow/core/kernels/sparse_slice_op.cc index 6aaf4fd88fbe89..373fb87d028ef7 100644 --- a/tensorflow/core/kernels/sparse_slice_op.cc +++ b/tensorflow/core/kernels/sparse_slice_op.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -67,27 +68,38 @@ class SparseSliceOp : public OpKernel { " but got length ", input_size.NumElements())); sparse::SparseTensor sparse_tensor; + TensorShape sparse_tensor_shape; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create( - input_indices, input_values, - TensorShape(input_shape.vec()), &sparse_tensor)); + TensorShapeBase::BuildTensorShapeBase( + input_shape.vec(), &sparse_tensor_shape)); + OP_REQUIRES_OK(context, sparse::SparseTensor::Create( + input_indices, input_values, + sparse_tensor_shape, &sparse_tensor)); const gtl::ArraySlice start(input_start.flat().data(), input_dims); const gtl::ArraySlice size(input_size.flat().data(), input_dims); - const sparse::SparseTensor output = + const StatusOr output_or = sparse::SparseTensor::Slice(sparse_tensor, start, size); + OP_REQUIRES_OK(context, output_or.status()); + auto output = output_or.ValueOrDie(); context->set_output(0, output.indices()); context->set_output(1, output.values()); - const TensorShape output_shape(output.shape()); + TensorShape output_shape; + OP_REQUIRES_OK(context, TensorShapeBase::BuildTensorShapeBase( + output.shape(), &output_shape)); + + TensorShape allocated_shape; + OP_REQUIRES_OK(context, TensorShapeBase::BuildTensorShapeBase( + {output_shape.dims()}, &allocated_shape)); Tensor* shape = nullptr; OP_REQUIRES_OK(context, - context->allocate_output(2, {output_shape.dims()}, &shape)); + context->allocate_output(2, allocated_shape, &shape)); for (int dim = 0; dim < output_shape.dims(); ++dim) { shape->vec()(dim) = output_shape.dim_size(dim); } diff --git a/tensorflow/core/kernels/sparse_softmax_op.cc b/tensorflow/core/kernels/sparse_softmax_op.cc index 548080b8b13738..0fdb8acf963eaf 100644 --- a/tensorflow/core/kernels/sparse_softmax_op.cc +++ b/tensorflow/core/kernels/sparse_softmax_op.cc @@ -21,6 +21,7 @@ limitations under the License. #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" @@ -62,14 +63,16 @@ class SparseSoftmaxOp : public OpKernel { errors::InvalidArgument( "Input should have rank >= 2, but received shape: ", shape_t->SummarizeValue(3))); + TensorShape shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape( + shape_t->flat(), &shape)); const int64 nnz = indices_t->dim_size(0); const int rank = static_cast(indices_t->dim_size(1)); SparseTensor st; OP_REQUIRES_OK( - context, SparseTensor::Create( - tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_t->flat()), &st)); + context, SparseTensor::Create(tensor::DeepCopy(*indices_t), + tensor::DeepCopy(*values_t), shape, &st)); Tensor *output_values = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nnz}), diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index 3b88a8ca2bf6ee..dfc572fe5a0f37 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -30,11 +30,15 @@ class SparseSplitOp : public OpKernel { } void Compute(OpKernelContext* context) override { - const int64 axis_input = context->input(0).scalar()(); + const Tensor& input_axis = context->input(0); const Tensor& input_indices = context->input(1); const Tensor& input_values = context->input(2); const Tensor& input_shape = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(input_axis.shape()), + errors::InvalidArgument( + "Input axis should be a scalar but received shape ", + input_axis.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices.shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", @@ -48,7 +52,8 @@ class SparseSplitOp : public OpKernel { "Input shape should be a vector but received shape ", input_shape.shape().DebugString())); - const int64 input_rank = input_shape.vec().size(); + const int64 axis_input = input_axis.scalar()(); + const int64 input_rank = input_shape.vec().size(); const int64 axis = (axis_input < 0) ? input_rank + axis_input : axis_input; OP_REQUIRES( diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc index 5ea5fca544d3e9..8b13cf5d5ce48f 100644 --- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc +++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc @@ -234,16 +234,29 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { errors::InvalidArgument( "Input indices should be a matrix but received shape ", input_indices->shape().DebugString())); - OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", input_values->shape().DebugString())); - OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", input_shape->shape().DebugString())); + OP_REQUIRES( + context, + input_values->shape().dim_size(0) == input_indices->shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices. ", "Got ", + input_values->shape().dim_size(0), + " values, indices shape: ", input_indices->shape().DebugString())); + OP_REQUIRES( + context, + input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", input_shape->shape().dim_size(0), + " dimensions, indices shape: ", + input_indices->shape().DebugString())); int rank = input_shape->NumElements(); @@ -253,21 +266,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); auto input_shape_vec = input_shape->vec(); - int new_num_elements = 1; - bool overflow_ocurred = false; - for (int i = 0; i < input_shape_vec.size(); i++) { - new_num_elements = - MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i)); - if (new_num_elements < 0) { - overflow_ocurred = true; - } - } - - OP_REQUIRES( - context, !overflow_ocurred, - errors::Internal("Encountered overflow from large input shape.")); - TensorShape tensor_input_shape(input_shape_vec); + TensorShape tensor_input_shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape(input_shape_vec, + &tensor_input_shape)); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 97af9abc4454ac..7b004983ba6823 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -13,13 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include #include #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/platform/types.h" +#include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace text { @@ -47,8 +51,14 @@ class StringNGramsOp : public tensorflow::OpKernel { ngram_width - 1); } - int get_num_ngrams(const int length, const int ngram_width) const { + StatusOr get_num_ngrams(const int length, const int ngram_width) const { + int64 limit = kint32max; int pad_width = get_pad_width(ngram_width); + if (pad_width > limit / 2 - length) { + return errors::InvalidArgument( + "Pad width could lead to integer overflow, got pad_width = ", + pad_width); + } return std::max(0, ((length + 2 * pad_width) - ngram_width) + 1); } @@ -112,8 +122,11 @@ class StringNGramsOp : public tensorflow::OpKernel { for (int i = 1; i <= num_batch_items; ++i) { int length = splits_vec(i) - splits_vec(i - 1); int num_ngrams = 0; - for (int ngram_width : ngram_widths_) - num_ngrams += get_num_ngrams(length, ngram_width); + for (int ngram_width : ngram_widths_) { + auto ngrams_or = get_num_ngrams(length, ngram_width); + OP_REQUIRES_OK(context, ngrams_or.status()); + num_ngrams += ngrams_or.ValueOrDie(); + } if (preserve_short_ && length > 0 && num_ngrams == 0) { num_ngrams = 1; } @@ -133,7 +146,9 @@ class StringNGramsOp : public tensorflow::OpKernel { for (int ngram_width : ngram_widths_) { auto output_start = &ngrams_data[output_start_idx]; int length = splits_vec(i + 1) - splits_vec(i); - int num_ngrams = get_num_ngrams(length, ngram_width); + auto ngrams_or = get_num_ngrams(length, ngram_width); + OP_REQUIRES_OK(context, ngrams_or.status()); + int num_ngrams = ngrams_or.ValueOrDie(); CreateNgrams(data_start, output_start, num_ngrams, ngram_width); output_start_idx += num_ngrams; } @@ -152,6 +167,16 @@ class StringNGramsOp : public tensorflow::OpKernel { // We don't have to worry about dynamic padding sizes here: if padding // was dynamic, every sequence would have had sufficient padding to // generate at least one ngram. + + // If reached here, pad_width should be > 0, pad_width_ = -1, + // which indicates max(ngram_widths) - 1 cannot be used here since + // ngram_width is not known. + OP_REQUIRES( + context, pad_width_ >= 0, + errors::InvalidArgument("Pad width should be >= 0 when " + "preserve_short_sequences is True and " + "ngram_widths are not provided, got ", + pad_width_)); int ngram_width = data_length + 2 * pad_width_; auto output_start = &ngrams_data[output_start_idx]; int num_ngrams = 1; diff --git a/tensorflow/core/kernels/unravel_index_op.cc b/tensorflow/core/kernels/unravel_index_op.cc index d5adef3bac170d..c9e2b33f3f0b6d 100644 --- a/tensorflow/core/kernels/unravel_index_op.cc +++ b/tensorflow/core/kernels/unravel_index_op.cc @@ -13,6 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/types.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" @@ -35,7 +39,8 @@ typedef Eigen::ThreadPoolDevice CPUDevice; template class UnravelIndexOp : public OpKernel { public: - explicit UnravelIndexOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} + explicit UnravelIndexOp(OpKernelConstruction* ctx) + : OpKernel(ctx), dtidx_(DataTypeToEnum::v()) {} void Compute(OpKernelContext* ctx) override { const Tensor& indices_tensor = ctx->input(0); @@ -54,12 +59,31 @@ class UnravelIndexOp : public OpKernel { auto dims = dims_tensor.vec(); // Make sure dims does not contain a zero + double prod = 1; + uint64_t limit; + if (dtidx_ == DataType::DT_INT64) { + limit = kint64max; + } else { + limit = kint32max; + } + for (int i = 0; i < dims.size(); i++) { OP_REQUIRES( ctx, dims(i) != 0, errors::InvalidArgument("Input dims cannot contain a dim of zero, " "but dims contains zero at index ", i)); + OP_REQUIRES(ctx, dims(i) > 0, + errors::InvalidArgument( + "Input dims cannot be negative. Got dim = ", dims(i), + " at index ", i)); + // Check interger overflow + OP_REQUIRES( + ctx, prod <= limit / dims(i), + errors::InvalidArgument("Input dims product is causing integer " + "overflow: (", + dims, ")")); + prod = (prod * dims(i)); } // Chek to make sure indices is not out of boundary @@ -132,6 +156,7 @@ class UnravelIndexOp : public OpKernel { strides_shifted.reshape(reshape).broadcast(bcast); } } + const DataType dtidx_; }; #define REGISTER_KERNEL(type) \ diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc index 90bc2cecd0b869..04d727441179ae 100644 --- a/tensorflow/core/ops/array_ops.cc +++ b/tensorflow/core/ops/array_ops.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/strided_slice_op.h" @@ -1637,11 +1638,21 @@ REGISTER_OP("ReverseSequence") return errors::InvalidArgument( "batch_dim must be < input rank: ", batch_dim, " vs. ", input_rank); } + if (seq_dim >= input_rank) { return errors::InvalidArgument( "seq_dim must be < input rank: ", seq_dim, " vs. ", input_rank); } + // To prevent out of bound access when calling c->Dim(input, batch_dim), + // batch_dim range [-1 * input rank, input rank) is allowed. However, + // the op implementation has a stricter bound for batch_dim requiring >= 0 + // value. Thus, perform strict check here. + if (batch_dim < 0) { + return errors::InvalidArgument("batch_dim must be >=0, got ", + batch_dim); + } + DimensionHandle batch_dim_dim = c->Dim(input, batch_dim); TF_RETURN_IF_ERROR( c->Merge(batch_dim_dim, c->Dim(seq_lens_shape, 0), &batch_dim_dim)); @@ -3023,6 +3034,12 @@ REGISTER_OP("Dequantize") return errors::InvalidArgument("axis should be at least -1, got ", axis); } + auto input_dims = c->Rank(c->input(0)); + if (axis > input_dims) { + return errors::InvalidArgument( + "Axis must be less than input dimension(", input_dims, "), got ", + axis); + } const int minmax_rank = (axis == -1) ? 0 : 1; TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle minmax; @@ -3030,6 +3047,13 @@ REGISTER_OP("Dequantize") TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); if (axis != -1) { ShapeHandle input; + if (axis >= kint32max) { + // Check int32 max bound for a corner case to prevent integer flow + // when input actually has kint32max rank and above bound check is not + // triggered. + return errors::InvalidArgument( + "Axis cannot be >= kint32max value, got ", axis); + } TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc index f0d85244b7b2d1..8861d22966fa1f 100644 --- a/tensorflow/core/ops/math_ops.cc +++ b/tensorflow/core/ops/math_ops.cc @@ -1449,6 +1449,13 @@ Status RangeSize(const Tensor* start_t, const Tensor* limit_t, Eigen::numext::abs(delta)) : (Eigen::numext::ceil( Eigen::numext::abs((limit - start) / delta)))); + + // Undefined behaviour if size will not fit into int64_t + if (size > std::numeric_limits::max()) { + return errors::InvalidArgument("Requires ((limit - start) / delta) <= ", + std::numeric_limits::max()); + } + c->set_output(0, c->Vector(static_cast(size))); return Status::OK(); } @@ -1656,6 +1663,11 @@ REGISTER_OP("Bincount") return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } + // Return `[size]` shape if size is known. int32 size_val = size_tensor->scalar()(); if (size_val < 0) { @@ -1687,6 +1699,10 @@ REGISTER_OP("DenseBincount") c->set_output(0, c->UnknownShape()); return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } int64 size_val; DataType dtype; @@ -1728,6 +1744,10 @@ REGISTER_OP("SparseBincount") c->set_output(0, c->UnknownShape()); return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } int64 size_val; DataType dtype; diff --git a/tensorflow/core/platform/BUILD b/tensorflow/core/platform/BUILD index 1e0fa66e353675..e5707750eba93a 100644 --- a/tensorflow/core/platform/BUILD +++ b/tensorflow/core/platform/BUILD @@ -667,6 +667,24 @@ cc_library( ], ) +cc_library( + name = "statusor", + srcs = [ + "statusor.cc", + "statusor_internals.h", + ], + hdrs = ["statusor.h"], + deps = [ + ":logging", + ":macros", + ":status", + "//tensorflow/core/lib/core:errors", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "str_util", srcs = ["str_util.cc"], @@ -887,6 +905,18 @@ tf_cc_test( ], ) +tf_cc_test( + name = "statusor_test", + size = "small", + srcs = ["statusor_test.cc"], + deps = [ + ":statusor", + "//tensorflow/core:lib", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + ], +) + # This is a hacky, do-nothing, binary that makes it easy to verify ability to # build, link, and in some cases run all of the libraries under platform. # Realistically, most of this would be covered by tests but at this point @@ -1404,6 +1434,7 @@ filegroup( "stacktrace.h", "stacktrace_handler.h", "status.h", + "statusor.h", "str_util.h", "strcat.h", "stringpiece.h", @@ -1651,6 +1682,9 @@ filegroup( "status.cc", "stack_frame.h", "status.h", + "statusor.h", + "statusor_internals.h", + "statusor.cc", "str_util.cc", "str_util.h", "strcat.cc", diff --git a/tensorflow/stream_executor/lib/statusor.cc b/tensorflow/core/platform/statusor.cc similarity index 89% rename from tensorflow/stream_executor/lib/statusor.cc rename to tensorflow/core/platform/statusor.cc index e0e851f96ef6fe..55d7df37c2b6b5 100644 --- a/tensorflow/stream_executor/lib/statusor.cc +++ b/tensorflow/core/platform/statusor.cc @@ -13,13 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/stream_executor/lib/statusor.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace internal_statusor { void Helper::HandleInvalidStatusCtorArg(Status* status) { @@ -36,5 +35,4 @@ void Helper::Crash(const Status& status) { } } // namespace internal_statusor -} // namespace port -} // namespace stream_executor +} // namespace tensorflow diff --git a/tensorflow/core/platform/statusor.h b/tensorflow/core/platform/statusor.h new file mode 100644 index 00000000000000..94cd3e484b9fc0 --- /dev/null +++ b/tensorflow/core/platform/statusor.h @@ -0,0 +1,393 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// StatusOr is the union of a Status object and a T object. StatusOr models +// the concept of an object that is either a value, or an error Status +// explaining why such a value is not present. To this end, StatusOr does not +// allow its Status value to be Status::OK. +// +// The primary use-case for StatusOr is as the return value of a +// function which may fail. +// +// Example client usage for a StatusOr, where T is not a pointer: +// +// StatusOr result = DoBigCalculationThatCouldFail(); +// if (result.ok()) { +// float answer = result.ValueOrDie(); +// printf("Big calculation yielded: %f", answer); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr: +// +// StatusOr result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo(result.ValueOrDie()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr>: +// +// StatusOr> result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo = std::move(result.ValueOrDie()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example factory implementation returning StatusOr: +// +// StatusOr FooFactory::MakeNewFoo(int arg) { +// if (arg <= 0) { +// return tensorflow::InvalidArgument("Arg must be positive"); +// } else { +// return new Foo(arg); +// } +// } +// +// Note that the assignment operators require that destroying the currently +// stored value cannot invalidate the argument; in other words, the argument +// cannot be an alias for the current value, or anything owned by the current +// value. +#ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ +#define TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ + +#include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/statusor_internals.h" + +namespace tensorflow { + +#if defined(__clang__) +// Only clang supports warn_unused_result as a type annotation. +template +class TF_MUST_USE_RESULT StatusOr; +#endif + +template +class StatusOr : private internal_statusor::StatusOrData, + private internal_statusor::TraitsBase< + std::is_copy_constructible::value, + std::is_move_constructible::value> { + template + friend class StatusOr; + + typedef internal_statusor::StatusOrData Base; + + public: + typedef T element_type; // DEPRECATED: use `value_type`. + typedef T value_type; + + // Constructs a new StatusOr with Status::UNKNOWN status. This is marked + // 'explicit' to try to catch cases like 'return {};', where people think + // StatusOr> will be initialized with an empty vector, + // instead of a Status::UNKNOWN status. + explicit StatusOr(); + + // StatusOr will be copy constructible/assignable if T is copy + // constructible. + StatusOr(const StatusOr&) = default; + StatusOr& operator=(const StatusOr&) = default; + + // StatusOr will be move constructible/assignable if T is move + // constructible. + StatusOr(StatusOr&&) = default; + StatusOr& operator=(StatusOr&&) = default; + + // Conversion copy/move constructor, T must be convertible from U. + template ::value>::type* = nullptr> + StatusOr(const StatusOr& other); + template ::value>::type* = nullptr> + StatusOr(StatusOr&& other); + + // Conversion copy/move assignment operator, T must be convertible from U. + template ::value>::type* = nullptr> + StatusOr& operator=(const StatusOr& other); + template ::value>::type* = nullptr> + StatusOr& operator=(StatusOr&& other); + + // Constructs a new StatusOr with the given value. After calling this + // constructor, calls to ValueOrDie() will succeed, and calls to status() will + // return OK. + // + // NOTE: Not explicit - we want to use StatusOr as a return type + // so it is convenient and sensible to be able to do 'return T()' + // when the return type is StatusOr. + // + // REQUIRES: T is copy constructible. + StatusOr(const T& value); + + // Constructs a new StatusOr with the given non-ok status. After calling + // this constructor, calls to ValueOrDie() will CHECK-fail. + // + // NOTE: Not explicit - we want to use StatusOr as a return + // value, so it is convenient and sensible to be able to do 'return + // Status()' when the return type is StatusOr. + // + // REQUIRES: !status.ok(). This requirement is DCHECKed. + // In optimized builds, passing Status::OK() here will have the effect + // of passing tensorflow::error::INTERNAL as a fallback. + StatusOr(const Status& status); + StatusOr& operator=(const Status& status); + + // TODO(b/62186997): Add operator=(T) overloads. + + // Similar to the `const T&` overload. + // + // REQUIRES: T is move constructible. + StatusOr(T&& value); + + // RValue versions of the operations declared above. + StatusOr(Status&& status); + StatusOr& operator=(Status&& status); + + // Returns this->status().ok() + bool ok() const { return this->status_.ok(); } + + // Returns a reference to our status. If this contains a T, then + // returns Status::OK(). + const Status& status() const &; + Status status() &&; + + // Returns a reference to our current value, or CHECK-fails if !this->ok(). + // + // Note: for value types that are cheap to copy, prefer simple code: + // + // T value = statusor.ValueOrDie(); + // + // Otherwise, if the value type is expensive to copy, but can be left + // in the StatusOr, simply assign to a reference: + // + // T& value = statusor.ValueOrDie(); // or `const T&` + // + // Otherwise, if the value type supports an efficient move, it can be + // used as follows: + // + // T value = std::move(statusor).ValueOrDie(); + // + // The std::move on statusor instead of on the whole expression enables + // warnings about possible uses of the statusor object after the move. + // C++ style guide waiver for ref-qualified overloads granted in cl/143176389 + // See go/ref-qualifiers for more details on such overloads. + const T& ValueOrDie() const &; + T& ValueOrDie() &; + const T&& ValueOrDie() const &&; + T&& ValueOrDie() &&; + + // Returns a reference to the current value. + // + // REQUIRES: this->ok() == true, otherwise the behavior is undefined. + // + // Use this->ok() or `operator bool()` to verify that there is a current + // value. Alternatively, see ValueOrDie() for a similar API that guarantees + // CHECK-failing if there is no current value. + const T& operator*() const&; + T& operator*() &; + const T&& operator*() const&&; + T&& operator*() &&; + + // Returns a pointer to the current value. + // + // REQUIRES: this->ok() == true, otherwise the behavior is undefined. + // + // Use this->ok() or `operator bool()` to verify that there is a current + // value. + const T* operator->() const; + T* operator->(); + + T ConsumeValueOrDie() { return std::move(ValueOrDie()); } + + // Ignores any errors. This method does nothing except potentially suppress + // complaints from any tools that are checking that errors are not dropped on + // the floor. + void IgnoreError() const; +}; + +//////////////////////////////////////////////////////////////////////////////// +// Implementation details for StatusOr + +template +StatusOr::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {} + +template +StatusOr::StatusOr(const T& value) : Base(value) {} + +template +StatusOr::StatusOr(const Status& status) : Base(status) {} + +template +StatusOr& StatusOr::operator=(const Status& status) { + this->Assign(status); + return *this; +} + +template +StatusOr::StatusOr(T&& value) : Base(std::move(value)) {} + +template +StatusOr::StatusOr(Status&& status) : Base(std::move(status)) {} + +template +StatusOr& StatusOr::operator=(Status&& status) { + this->Assign(std::move(status)); + return *this; +} + +template +template ::value>::type*> +inline StatusOr::StatusOr(const StatusOr& other) + : Base(static_cast::Base&>(other)) {} + +template +template ::value>::type*> +inline StatusOr& StatusOr::operator=(const StatusOr& other) { + if (other.ok()) + this->Assign(other.ValueOrDie()); + else + this->Assign(other.status()); + return *this; +} + +template +template ::value>::type*> +inline StatusOr::StatusOr(StatusOr&& other) + : Base(static_cast::Base&&>(other)) {} + +template +template ::value>::type*> +inline StatusOr& StatusOr::operator=(StatusOr&& other) { + if (other.ok()) { + this->Assign(std::move(other).ValueOrDie()); + } else { + this->Assign(std::move(other).status()); + } + return *this; +} + +template +const Status& StatusOr::status() const & { + return this->status_; +} +template +Status StatusOr::status() && { + // Note that we copy instead of moving the status here so that + // ~StatusOrData() can call ok() without invoking UB. + return ok() ? Status::OK() : this->status_; +} + +template +const T& StatusOr::ValueOrDie() const & { + this->EnsureOk(); + return this->data_; +} + +template +T& StatusOr::ValueOrDie() & { + this->EnsureOk(); + return this->data_; +} + +template +const T&& StatusOr::ValueOrDie() const && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +T&& StatusOr::ValueOrDie() && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +const T* StatusOr::operator->() const { + this->EnsureOk(); + return &this->data_; +} + +template +T* StatusOr::operator->() { + this->EnsureOk(); + return &this->data_; +} + +template +const T& StatusOr::operator*() const& { + this->EnsureOk(); + return this->data_; +} + +template +T& StatusOr::operator*() & { + this->EnsureOk(); + return this->data_; +} + +template +const T&& StatusOr::operator*() const&& { + this->EnsureOk(); + return std::move(this->data_); +} + +template +T&& StatusOr::operator*() && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +void StatusOr::IgnoreError() const { + // no-op +} + +#define TF_ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ + TF_ASSERT_OK_AND_ASSIGN_IMPL( \ + TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, \ + rexpr); + +#define TF_ASSERT_OK_AND_ASSIGN_IMPL(statusor, lhs, rexpr) \ + auto statusor = (rexpr); \ + ASSERT_TRUE(statusor.status().ok()) << statusor.status(); \ + lhs = std::move(statusor.ValueOrDie()) + +#define TF_STATUS_MACROS_CONCAT_NAME(x, y) TF_STATUS_MACROS_CONCAT_IMPL(x, y) +#define TF_STATUS_MACROS_CONCAT_IMPL(x, y) x##y + +#define TF_ASSIGN_OR_RETURN(lhs, rexpr) \ + TF_ASSIGN_OR_RETURN_IMPL( \ + TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr) + +#define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \ + auto statusor = (rexpr); \ + if (TF_PREDICT_FALSE(!statusor.ok())) { \ + return statusor.status(); \ + } \ + lhs = std::move(statusor.ValueOrDie()) + +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ diff --git a/tensorflow/stream_executor/lib/statusor_internals.h b/tensorflow/core/platform/statusor_internals.h similarity index 95% rename from tensorflow/stream_executor/lib/statusor_internals.h rename to tensorflow/core/platform/statusor_internals.h index d3a6026f4725c0..ebd48e4c29c988 100644 --- a/tensorflow/stream_executor/lib/statusor_internals.h +++ b/tensorflow/core/platform/statusor_internals.h @@ -13,14 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ -#define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ +#ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ +#define TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ #include "tensorflow/core/platform/macros.h" -#include "tensorflow/stream_executor/lib/status.h" +#include "tensorflow/core/platform/status.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace internal_statusor { class Helper { @@ -243,7 +242,6 @@ struct TraitsBase { }; } // namespace internal_statusor -} // namespace port -} // namespace stream_executor +} // namespace tensorflow -#endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ +#endif // TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ diff --git a/tensorflow/stream_executor/lib/statusor_test.cc b/tensorflow/core/platform/statusor_test.cc similarity index 99% rename from tensorflow/stream_executor/lib/statusor_test.cc rename to tensorflow/core/platform/statusor_test.cc index 6b59eaa402923f..ba6b2a8c73fbd1 100644 --- a/tensorflow/stream_executor/lib/statusor_test.cc +++ b/tensorflow/core/platform/statusor_test.cc @@ -15,18 +15,17 @@ limitations under the License. // Unit tests for StatusOr -#include "tensorflow/stream_executor/lib/statusor.h" +#include "tensorflow/core/platform/statusor.h" #include #include -#include "tensorflow/core/platform/test.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace { class Base1 { @@ -672,5 +671,4 @@ void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) { BENCHMARK(BM_StatusOrFactoryFailLongMsg); } // namespace -} // namespace port -} // namespace stream_executor +} // namespace tensorflow diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 799e359f46a82d..03b3d08ffd0f34 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 5 -#define TF_PATCH_VERSION 2 +#define TF_PATCH_VERSION 3 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index 341290dbbc6982..be03390fef8ea1 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -30,10 +30,12 @@ limitations under the License. #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/sparse/dim_comparator.h" #include "tensorflow/core/util/sparse/group_iterator.h" +#include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace sparse { @@ -177,9 +179,9 @@ class SparseTensor { // element of the array representing one dimension. The start is the start // index at each dimension and the size is the size at each dimension. template - static SparseTensor Slice(const SparseTensor& tensor, - const gtl::ArraySlice& start, - const gtl::ArraySlice& size); + static StatusOr Slice(const SparseTensor& tensor, + const gtl::ArraySlice start, + const gtl::ArraySlice size); // Picks out the dimensions according to `dim_indices`. std::vector PickDims(gtl::ArraySlice dim_indices) const { @@ -577,9 +579,9 @@ inline Status SparseTensor::Split(const SparseTensor& input_tensor, } template -inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, - const gtl::ArraySlice& start, - const gtl::ArraySlice& size) { +inline StatusOr SparseTensor::Slice( + const SparseTensor& input_tensor, const gtl::ArraySlice start, + const gtl::ArraySlice size) { TensorShape output_shape(input_tensor.shape()); const int dims = input_tensor.dims(); @@ -590,15 +592,16 @@ inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, const int64 input_size = output_shape.dim_size(dim); const int64 start_index = start[dim]; const int64 slice_size = size[dim]; - if (start_index + slice_size < input_size) { + if (start_index < input_size - slice_size) { // The entire selection is within input boundaries. - output_shape.set_dim(dim, slice_size); + TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, slice_size)); } else if (start_index < input_size) { // The selection starts within input boundaries, but goes beyond them. - output_shape.set_dim(dim, input_size - start_index); + TF_RETURN_IF_ERROR( + output_shape.SetDimWithStatus(dim, input_size - start_index)); } else { // The selection is entirely out of input boundaries. - output_shape.set_dim(dim, 0); + TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, 0)); } } diff --git a/tensorflow/core/util/sparse/sparse_tensor_test.cc b/tensorflow/core/util/sparse/sparse_tensor_test.cc index f898ba586126cd..df1fa6f082ff78 100644 --- a/tensorflow/core/util/sparse/sparse_tensor_test.cc +++ b/tensorflow/core/util/sparse/sparse_tensor_test.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" @@ -690,7 +691,8 @@ TEST(SparseTensorTest, Slice) { size[0] = 2; size[1] = 3; - SparseTensor slice = SparseTensor::Slice(st, start, size); + TF_ASSERT_OK_AND_ASSIGN(SparseTensor slice, + SparseTensor::Slice(st, start, size)); EXPECT_EQ(TensorShape(slice.shape()), TensorShape({2, 3})); EXPECT_EQ(slice.values().NumElements(), 3); @@ -724,8 +726,9 @@ TEST(SparseTensorTest, SliceReducesOutputDimension) { TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({num_rows, num_columns}), &st)); - SparseTensor slice = - SparseTensor::Slice(st, {num_rows + 1, 1}, {1, num_columns}); + TF_ASSERT_OK_AND_ASSIGN( + SparseTensor slice, + SparseTensor::Slice(st, {num_rows + 1, 1}, {1, num_columns})); EXPECT_EQ(TensorShape(slice.shape()), TensorShape({0, 1})); } diff --git a/tensorflow/lite/BUILD b/tensorflow/lite/BUILD index 8d16389badccbd..160a3d4e45a8ae 100644 --- a/tensorflow/lite/BUILD +++ b/tensorflow/lite/BUILD @@ -762,6 +762,7 @@ cc_library( copts = tflite_copts_warnings() + tflite_copts(), deps = [ ":kernel_api", + ":macros", "//tensorflow/lite/c:common", "//tensorflow/lite/schema:schema_fbs", ], @@ -787,6 +788,7 @@ cc_test( features = ["-dynamic_link_test_srcs"], # see go/dynamic_link_test_srcs deps = [ ":util", + "//tensorflow/lite/c:c_api_types", "//tensorflow/lite/c:common", "//tensorflow/lite/schema:schema_fbs", "@com_google_googletest//:gtest", diff --git a/tensorflow/lite/c/common.c b/tensorflow/lite/c/common.c index 00dd0260cbcc90..4e35a52ee6b5a3 100644 --- a/tensorflow/lite/c/common.c +++ b/tensorflow/lite/c/common.c @@ -21,7 +21,7 @@ limitations under the License. #include #endif // TF_LITE_STATIC_MEMORY -int TfLiteIntArrayGetSizeInBytes(int size) { +size_t TfLiteIntArrayGetSizeInBytes(int size) { static TfLiteIntArray dummy; return sizeof(dummy) + sizeof(dummy.data[0]) * size; } @@ -45,7 +45,7 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, #ifndef TF_LITE_STATIC_MEMORY TfLiteIntArray* TfLiteIntArrayCreate(int size) { - int alloc_size = TfLiteIntArrayGetSizeInBytes(size); + size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size); if (alloc_size <= 0) return NULL; TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); if (!ret) return ret; diff --git a/tensorflow/lite/c/common.h b/tensorflow/lite/c/common.h index 56e0f8d54f1ddf..9697310e2cd225 100644 --- a/tensorflow/lite/c/common.h +++ b/tensorflow/lite/c/common.h @@ -94,7 +94,7 @@ typedef struct TfLiteIntArray { // Given the size (number of elements) in a TfLiteIntArray, calculate its size // in bytes. -int TfLiteIntArrayGetSizeInBytes(int size); +size_t TfLiteIntArrayGetSizeInBytes(int size); #ifndef TF_LITE_STATIC_MEMORY // Create a array of a given `size` (uninitialized entries). diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 7dc45d3faf6285..fedcbf26114047 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -649,27 +649,6 @@ TfLiteStatus Subgraph::CheckInputAndOutputForOverlap(const int* input_indices, return kTfLiteOk; } -namespace { -// Multiply two sizes and return true if overflow occurred; -// This is based off tensorflow/overflow.h but is simpler as we already -// have unsigned numbers. It is also generalized to work where sizeof(size_t) -// is not 8. -TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) { - // Multiplying a * b where a and b are size_t cannot result in overflow in a - // size_t accumulator if both numbers have no non-zero bits in their upper - // half. - constexpr size_t size_t_bits = 8 * sizeof(size_t); - constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2; - *product = a * b; - // If neither integers have non-zero bits past 32 bits can't overflow. - // Otherwise check using slow devision. - if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) { - if (a != 0 && *product / a != b) return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace - TfLiteStatus Subgraph::BytesRequired(TfLiteType type, const int* dims, size_t dims_size, size_t* bytes) { TF_LITE_ENSURE(&context_, bytes != nullptr); diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index e2e344169a6be2..f277477bc2e3d0 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -115,6 +115,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 4); + TF_LITE_ENSURE(context, params->dilation_height_factor > 0); + TF_LITE_ENSURE(context, params->dilation_width_factor > 0); const TfLiteType data_type = input->type; diff --git a/tensorflow/lite/kernels/embedding_lookup_sparse.cc b/tensorflow/lite/kernels/embedding_lookup_sparse.cc index 4ad1054340c9c3..270ccc929d9bd8 100644 --- a/tensorflow/lite/kernels/embedding_lookup_sparse.cc +++ b/tensorflow/lite/kernels/embedding_lookup_sparse.cc @@ -72,6 +72,7 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/util.h" namespace tflite { namespace ops { @@ -158,6 +159,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights)); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value)); + const size_t values_size = NumElements(value); const int lookup_rank = SizeOfDimension(indices, 1); const int embedding_rank = NumDimensions(value); @@ -175,25 +177,33 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); TF_LITE_ENSURE(context, output_shape != nullptr); int k = 0; - int embedding_size = 1; - int lookup_size = 1; + size_t embedding_size = 1; + size_t lookup_size = 1; for (int i = 0; i < lookup_rank - 1; i++, k++) { - const int dim = dense_shape->data.i32[i]; - lookup_size *= dim; + const size_t dim = dense_shape->data.i32[i]; + TF_LITE_ENSURE_MSG( + context, + MultiplyAndCheckOverflow(lookup_size, dim, &lookup_size) == kTfLiteOk, + "Lookup size overflowed."); output_shape->data[k] = dim; } for (int i = 1; i < embedding_rank; i++, k++) { - const int dim = SizeOfDimension(value, i); - embedding_size *= dim; + const size_t dim = SizeOfDimension(value, i); + TF_LITE_ENSURE_MSG(context, + MultiplyAndCheckOverflow(embedding_size, dim, + &embedding_size) == kTfLiteOk, + "Embedding size overflowed."); output_shape->data[k] = dim; } TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape)); - const int output_size = lookup_size * embedding_size; + const size_t output_size = lookup_size * embedding_size; TfLiteTensorRealloc(output_size * sizeof(float), output); float* output_ptr = GetTensorData(output); const float* weights_ptr = GetTensorData(weights); const float* value_ptr = GetTensorData(value); + // Makes sure reallocation was successful. + TF_LITE_ENSURE(context, output_ptr != nullptr); std::fill_n(output_ptr, output_size, 0.0f); @@ -244,6 +254,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { current_squares_weight += w * w; current_total_weight += w; for (int k = 0; k < embedding_size; k++) { + // only index if indices are valid + if (current_output_offset + k < 0) continue; + if (current_output_offset + k >= output_size) continue; + if (example_embedding_offset + k < 0) continue; + if (example_embedding_offset + k >= values_size) continue; output_ptr[current_output_offset + k] += value_ptr[example_embedding_offset + k] * w; } diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index e9d5d3dc6a5507..a97415e3f908dc 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -879,6 +879,36 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } +// Verifies that sparsity values are valid given input/weight/output. +bool VerifySparsity(const RuntimeShape& weights_shape, + const RuntimeShape& input_shape, + const RuntimeShape& output_shape, + const TfLiteSparsity* sparsity) { + const int weights_dims_count = weights_shape.DimensionsCount(); + const int output_dims_count = output_shape.DimensionsCount(); + const int w0_size = sparsity->dim_metadata[0].dense_size; + const int accum_depth = weights_shape.Dims(weights_dims_count - 1); + const int output_elements = output_shape.FlatSize(); + const int input_elements = input_shape.FlatSize(); + const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); + const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2, + output_shape, output_dims_count - 1); + const int max_batch_index = batches - 1; + const int max_output = max_batch_index * output_depth + w0_size; + const int max_batch_depth = accum_depth * max_batch_index; + + // Verify output size is enough. + if (output_elements < max_output) return false; + + // Verify index from sparse in input is valid. + for (int i = 0; i < sparsity->dim_metadata[1].array_indices->size; ++i) { + if (input_elements <= + max_batch_depth + sparsity->dim_metadata[1].array_indices->data[i]) + return false; + } + return true; +} + template TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, @@ -919,24 +949,32 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, "Unsupported sparse fully-connected weight format."); return kTfLiteError; } + const auto& input_shape = GetTensorShape(input); + const auto& filter_shape = GetTensorShape(filter); + const auto& output_shape = GetTensorShape(output); + const auto& bias_shape = GetTensorShape(bias); + if (!VerifySparsity(filter_shape, input_shape, output_shape, &sparsity)) { + TF_LITE_KERNEL_LOG(context, "Invalid sparse fully-connected format."); + return kTfLiteError; + } if (sparsity.dim_metadata_size == kDimMetadataSizeRandomSparse) { // Random sparse. optimized_ops::FullyConnectedSparseWeight( - sparsity, op_params, GetTensorShape(input), - GetTensorData(input), GetTensorShape(filter), - GetTensorData(filter), GetTensorShape(bias), - GetTensorData(bias), GetTensorShape(output), - GetTensorData(output)); + sparsity, op_params, // Disable formatting + input_shape, GetTensorData(input), // Disable formatting + filter_shape, GetTensorData(filter), // Disable formatting + bias_shape, GetTensorData(bias), // Disable formatting + output_shape, GetTensorData(output)); } else if (sparsity.dim_metadata_size == kDimMetadataSizeBlockSparse && sparsity.dim_metadata[2].dense_size == 4) { // Block sparse with block size of 1x4. optimized_ops::FullyConnectedSparseWeight1x4( - sparsity, op_params, GetTensorShape(input), - GetTensorData(input), GetTensorShape(filter), - GetTensorData(filter), GetTensorShape(bias), - GetTensorData(bias), GetTensorShape(output), - GetTensorData(output), + sparsity, op_params, // Disable formatting + input_shape, GetTensorData(input), // Disable formatting + filter_shape, GetTensorData(filter), // Disable formatting + bias_shape, GetTensorData(bias), // Disable formatting + output_shape, GetTensorData(output), CpuBackendContext::GetFromContext(context)); } else { TF_LITE_KERNEL_LOG(context, diff --git a/tensorflow/lite/kernels/internal/common.h b/tensorflow/lite/kernels/internal/common.h index c433fc8817fe53..bd80d68c617db7 100644 --- a/tensorflow/lite/kernels/internal/common.h +++ b/tensorflow/lite/kernels/internal/common.h @@ -75,6 +75,7 @@ float ActivationFunction(float x) { inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { + if (bias_size == 0) return; // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). diff --git a/tensorflow/lite/schema/upgrade_schema_test.py b/tensorflow/lite/schema/upgrade_schema_test.py index e55925053e0ae9..99154ccb205b7f 100644 --- a/tensorflow/lite/schema/upgrade_schema_test.py +++ b/tensorflow/lite/schema/upgrade_schema_test.py @@ -254,13 +254,13 @@ class TestSchemaUpgrade(test_util.TensorFlowTestCase): def testNonExistentFile(self): converter = upgrade_schema_lib.Converter() - non_existent = tempfile.mktemp(suffix=".json") + _, non_existent = tempfile.mkstemp(suffix=".json") # safe to ignore fd with self.assertRaisesRegex(IOError, "No such file or directory"): converter.Convert(non_existent, non_existent) def testInvalidExtension(self): converter = upgrade_schema_lib.Converter() - invalid_extension = tempfile.mktemp(suffix=".foo") + _, invalid_extension = tempfile.mkstemp(suffix=".foo") # safe to ignore fd with self.assertRaisesRegex(ValueError, "Invalid extension on input"): converter.Convert(invalid_extension, invalid_extension) with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json: diff --git a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc index c5a7778371eb7e..3fabaffd735c47 100644 --- a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc +++ b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc @@ -245,10 +245,12 @@ FormatConverter::FormatConverter(const std::vector& shape, block_size_.resize(block_map_.size()); for (int i = 0; i < original_rank; i++) { if (block_dim < block_map_.size() && block_map_[block_dim] == i) { - int orig_dim = traversal_order_[original_rank + block_dim]; - block_size_[block_dim] = sparsity.dim_metadata[orig_dim].dense_size; - blocked_shape_[i] = shape[i] / sparsity.dim_metadata[orig_dim].dense_size; - block_dim++; + if (original_rank + block_dim < traversal_order_.size()) { + int orig_dim = traversal_order_[original_rank + block_dim]; + block_size_[block_dim] = sparsity.dim_metadata[orig_dim].dense_size; + blocked_shape_[i] = shape[i] / sparsity.dim_metadata[orig_dim].dense_size; + block_dim++; + } } else { blocked_shape_[i] = shape[i]; } @@ -291,13 +293,15 @@ void FormatConverter::Populate(const T* src_data, std::vector indices, Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i, src_data_ptr, dest_data); } - } else { + } else if (prev_idx + 1 < dim_metadata_[metadata_idx].size()) { const auto& array_segments = dim_metadata_[metadata_idx]; const auto& array_indices = dim_metadata_[metadata_idx + 1]; for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1]; i++) { - indices[level] = array_indices[i]; - Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data); + if (i < array_indices.size() && level < indices.size()) { + indices[level] = array_indices[i]; + Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data); + } } } } diff --git a/tensorflow/lite/util.cc b/tensorflow/lite/util.cc index 84dbc16b6079c0..cb2d1ef73a950a 100644 --- a/tensorflow/lite/util.cc +++ b/tensorflow/lite/util.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/macros.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -176,4 +177,19 @@ bool IsValidationSubgraph(const char* name) { // NOLINTNEXTLINE: can't use absl::StartsWith as absl is not allowed. return name && std::string(name).find(kValidationSubgraphNamePrefix) == 0; } + +TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) { + // Multiplying a * b where a and b are size_t cannot result in overflow in a + // size_t accumulator if both numbers have no non-zero bits in their upper + // half. + constexpr size_t size_t_bits = 8 * sizeof(size_t); + constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2; + *product = a * b; + // If neither integers have non-zero bits past 32 bits can't overflow. + // Otherwise check using slow devision. + if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) { + if (a != 0 && *product / a != b) return kTfLiteError; + } + return kTfLiteOk; +} } // namespace tflite diff --git a/tensorflow/lite/util.h b/tensorflow/lite/util.h index d9d7f7a0a8e673..e6a1aefcd9e5b8 100644 --- a/tensorflow/lite/util.h +++ b/tensorflow/lite/util.h @@ -99,6 +99,12 @@ constexpr char kValidationSubgraphNamePrefix[] = "VALIDATION:"; // Checks whether the prefix of the subgraph name indicates the subgraph is a // validation subgraph. bool IsValidationSubgraph(const char* name); + +// Multiply two sizes and return true if overflow occurred; +// This is based off tensorflow/overflow.h but is simpler as we already +// have unsigned numbers. It is also generalized to work where sizeof(size_t) +// is not 8. +TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product); } // namespace tflite #endif // TENSORFLOW_LITE_UTIL_H_ diff --git a/tensorflow/lite/util_test.cc b/tensorflow/lite/util_test.cc index 46601b908dc690..9b630d515575d4 100644 --- a/tensorflow/lite/util_test.cc +++ b/tensorflow/lite/util_test.cc @@ -22,6 +22,7 @@ limitations under the License. #include #include +#include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -130,6 +131,13 @@ TEST(ValidationSubgraph, NameIsDetected) { EXPECT_TRUE(IsValidationSubgraph("VALIDATION:main")); } +TEST(MultiplyAndCheckOverflow, Validate) { + size_t res = 0; + EXPECT_TRUE(MultiplyAndCheckOverflow(1, 2, &res) == kTfLiteOk); + EXPECT_FALSE(MultiplyAndCheckOverflow(static_cast(123456789023), + 1223423425, &res) == kTfLiteOk); +} + } // namespace } // namespace tflite diff --git a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py index ea81b7df1bca8b..f6ba69cdf5b909 100644 --- a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py +++ b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py @@ -138,6 +138,25 @@ def testEmptySparseTensorSlicesInvalid(self): with self.assertRaises(errors.InvalidArgumentError): sess.run(init_op, feed_dict={st: sparse_feed}) + @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) + def testEmptySparseTensorSlicesInvalid2(self): + """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" + st = array_ops.sparse_placeholder(dtypes.float64) + iterator = dataset_ops.make_initializable_iterator( + dataset_ops.Dataset.from_sparse_tensor_slices(st)) + init_op = iterator.initializer + + with self.cached_session() as sess: + # Test with an empty sparse tensor but with non empty values. + empty_indices = [[]] + empty_values = [] + dense_shape = [1, 1] + sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values, + dense_shape) + # Here, we expect the test to fail when running the feed. + with self.assertRaises(errors.InvalidArgumentError): + sess.run(init_op, feed_dict={st: sparse_feed}) + @combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"])) def testFromSparseTensorSlicesError(self): with self.assertRaises(AttributeError): diff --git a/tensorflow/python/debug/cli/curses_ui_test.py b/tensorflow/python/debug/cli/curses_ui_test.py index 3ffa031923dc53..db844f24a9a82f 100644 --- a/tensorflow/python/debug/cli/curses_ui_test.py +++ b/tensorflow/python/debug/cli/curses_ui_test.py @@ -90,8 +90,9 @@ def __init__(self, # Override the default path to the command history file to avoid test # concurrency issues. + _, history_file_path = tempfile.mkstemp() # safe to ignore fd self._command_history_store = debugger_cli_common.CommandHistory( - history_file_path=tempfile.mktemp()) + history_file_path=history_file_path) # Below, override the _screen_ prefixed member methods that interact with the # actual terminal, so that the mock can run in a terminal-less environment. diff --git a/tensorflow/python/debug/cli/debugger_cli_common_test.py b/tensorflow/python/debug/cli/debugger_cli_common_test.py index 93df845c4c585d..6d0bd2bbd906b4 100644 --- a/tensorflow/python/debug/cli/debugger_cli_common_test.py +++ b/tensorflow/python/debug/cli/debugger_cli_common_test.py @@ -253,7 +253,9 @@ def testWriteToFileSucceeds(self): font_attr_segs={0: [(0, 5, "red")], 1: [(0, 7, "blue")]}) - file_path = tempfile.mktemp() + fd, file_path = tempfile.mkstemp() + os.close(fd) # file opened exclusively, so we need to close this + # a better fix would be to make the API take a fd screen_output.write_to_file(file_path) with gfile.Open(file_path, "r") as f: @@ -930,12 +932,13 @@ def testDeregisterNonexistentContext(self): class CommandHistoryTest(test_util.TensorFlowTestCase): def setUp(self): - self._history_file_path = tempfile.mktemp() + self._fd, self._history_file_path = tempfile.mkstemp() self._cmd_hist = debugger_cli_common.CommandHistory( limit=3, history_file_path=self._history_file_path) def tearDown(self): if os.path.isfile(self._history_file_path): + os.close(self._fd) os.remove(self._history_file_path) def _restoreFileReadWritePermissions(self, file_path): @@ -1002,13 +1005,6 @@ def testRepeatingCommandsDoNotGetLoggedRepeatedly(self): self.assertEqual(["help"], self._cmd_hist.most_recent_n(2)) - def testCommandHistoryFileIsCreated(self): - self.assertFalse(os.path.isfile(self._history_file_path)) - self._cmd_hist.add_command("help") - self.assertTrue(os.path.isfile(self._history_file_path)) - with open(self._history_file_path, "rt") as f: - self.assertEqual(["help\n"], f.readlines()) - def testLoadingCommandHistoryFileObeysLimit(self): self._cmd_hist.add_command("help 1") self._cmd_hist.add_command("help 2") diff --git a/tensorflow/python/debug/cli/readline_ui_test.py b/tensorflow/python/debug/cli/readline_ui_test.py index 011ba23fc4d63b..64351ceb6b820a 100644 --- a/tensorflow/python/debug/cli/readline_ui_test.py +++ b/tensorflow/python/debug/cli/readline_ui_test.py @@ -35,9 +35,11 @@ class MockReadlineUI(readline_ui.ReadlineUI): """Test subclass of ReadlineUI that bypasses terminal manipulations.""" def __init__(self, on_ui_exit=None, command_sequence=None): + _, config_file_path = tempfile.mkstemp() # safe to ignore fd readline_ui.ReadlineUI.__init__( - self, on_ui_exit=on_ui_exit, - config=cli_config.CLIConfig(config_file_path=tempfile.mktemp())) + self, + on_ui_exit=on_ui_exit, + config=cli_config.CLIConfig(config_file_path=config_file_path)) self._command_sequence = command_sequence self._command_counter = 0 @@ -168,7 +170,7 @@ def callback_for_test(): self.assertTrue(observer["callback_invoked"]) def testIncompleteRedirectWorks(self): - output_path = tempfile.mktemp() + _, output_path = tempfile.mkstemp() # safe to ignore fd ui = MockReadlineUI( command_sequence=["babble -n 2 > %s" % output_path, "exit"]) diff --git a/tensorflow/python/debug/examples/v1/debug_errors.py b/tensorflow/python/debug/examples/v1/debug_errors.py index 5480a9b6f544e0..83c497999e40eb 100644 --- a/tensorflow/python/debug/examples/v1/debug_errors.py +++ b/tensorflow/python/debug/examples/v1/debug_errors.py @@ -44,9 +44,11 @@ def main(_): z = tf.matmul(m, v, name="z") if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) diff --git a/tensorflow/python/debug/examples/v1/debug_keras.py b/tensorflow/python/debug/examples/v1/debug_keras.py index ffc575776c26d2..4f7405a4deea71 100644 --- a/tensorflow/python/debug/examples/v1/debug_keras.py +++ b/tensorflow/python/debug/examples/v1/debug_keras.py @@ -44,9 +44,11 @@ def main(_): sess = tf.Session() if FLAGS.debug: # Use the command-line interface (CLI) of tfdbg. - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) elif FLAGS.tensorboard_debug_address: diff --git a/tensorflow/python/debug/examples/v1/debug_mnist_v1.py b/tensorflow/python/debug/examples/v1/debug_mnist_v1.py index cde1fb97ff280d..d2e67e85b41dd0 100644 --- a/tensorflow/python/debug/examples/v1/debug_mnist_v1.py +++ b/tensorflow/python/debug/examples/v1/debug_mnist_v1.py @@ -214,9 +214,11 @@ def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): "The --debug and --tensorboard_debug_address flags are mutually " "exclusive.") if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) elif FLAGS.tensorboard_debug_address: diff --git a/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py b/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py index 81f41247fd35dc..0de57ef2f81a31 100644 --- a/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py +++ b/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py @@ -62,9 +62,11 @@ def test_input_fn(): "exclusive.") hooks = [] if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None hooks.append( tf_debug.LocalCLIDebugHook( ui_type=FLAGS.ui_type, diff --git a/tensorflow/python/debug/lib/debug_data_test.py b/tensorflow/python/debug/lib/debug_data_test.py index d7ba5cde1f7dbb..e50c7498c096b7 100644 --- a/tensorflow/python/debug/lib/debug_data_test.py +++ b/tensorflow/python/debug/lib/debug_data_test.py @@ -151,8 +151,7 @@ def testDumpSizeBytesIsNoneForNonexistentFilePath(self): class DebugDumpDirTest(test_util.TensorFlowTestCase): def setUp(self): - self._dump_root = tempfile.mktemp() - os.mkdir(self._dump_root) + self._dump_root = tempfile.mkdtemp() def tearDown(self): # Tear down temporary dump directory. @@ -183,7 +182,7 @@ def _makeDataDirWithMultipleDevicesAndDuplicateNodeNames(self): def testDebugDumpDir_nonexistentDumpRoot(self): with self.assertRaisesRegex(IOError, "does not exist"): - debug_data.DebugDumpDir(tempfile.mktemp() + "_foo") + debug_data.DebugDumpDir(tempfile.mkdtemp() + "_foo") def testDebugDumpDir_invalidFileNamingPattern(self): # File name with too few underscores should lead to an exception. diff --git a/tensorflow/python/debug/lib/source_utils_test.py b/tensorflow/python/debug/lib/source_utils_test.py index 366b25e89ac367..ab0dbe616e3fcd 100644 --- a/tensorflow/python/debug/lib/source_utils_test.py +++ b/tensorflow/python/debug/lib/source_utils_test.py @@ -265,8 +265,8 @@ def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self): def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self): # Create an unrelated source file. - unrelated_source_path = tempfile.mktemp() - with open(unrelated_source_path, "wt") as source_file: + fd, unrelated_source_path = tempfile.mkstemp() + with open(fd, "wt") as source_file: source_file.write("print('hello, world')\n") self.assertEqual({}, @@ -277,8 +277,8 @@ def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self): os.remove(unrelated_source_path) def testLoadingPythonSourceFileWithNonAsciiChars(self): - source_path = tempfile.mktemp() - with open(source_path, "wb") as source_file: + fd, source_path = tempfile.mkstemp() + with open(fd, "wb") as source_file: source_file.write(u"print('\U0001f642')\n".encode("utf-8")) source_lines, _ = source_utils.load_source(source_path) self.assertEqual(source_lines, [u"print('\U0001f642')", u""]) diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper.py b/tensorflow/python/debug/wrappers/local_cli_wrapper.py index 4069bdf1f3ffae..1fb2c1c104fe60 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper.py @@ -84,7 +84,7 @@ def __init__(self, self, sess, thread_name_filter=thread_name_filter) if not dump_root: - self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX) + self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX) else: dump_root = os.path.expanduser(dump_root) if os.path.isfile(dump_root): diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py index 0d930b6e7e08a5..a8a6c1c5174d41 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py @@ -32,7 +32,6 @@ from tensorflow.python.debug.wrappers import local_cli_wrapper from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes -from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io @@ -116,7 +115,7 @@ def _launch_cli(self): config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config"))) self._register_this_run_info(readline_cli) - while True: + while self._command_pointer < len(self._command_sequence): command = self._command_sequence[self._command_pointer] self._command_pointer += 1 @@ -136,7 +135,7 @@ def _launch_cli(self): class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase): def setUp(self): - self._tmp_dir = tempfile.mktemp() + self._tmp_dir = tempfile.mkdtemp() self.v = variables.VariableV1(10.0, name="v") self.w = variables.VariableV1(21.0, name="w") @@ -178,15 +177,7 @@ def testConstructWrapper(self): local_cli_wrapper.LocalCLIDebugWrapperSession( session.Session(), log_usage=False) - def testConstructWrapperWithExistingEmptyDumpRoot(self): - os.mkdir(self._tmp_dir) - self.assertTrue(os.path.isdir(self._tmp_dir)) - - local_cli_wrapper.LocalCLIDebugWrapperSession( - session.Session(), dump_root=self._tmp_dir, log_usage=False) - def testConstructWrapperWithExistingNonEmptyDumpRoot(self): - os.mkdir(self._tmp_dir) dir_path = os.path.join(self._tmp_dir, "foo") os.mkdir(dir_path) self.assertTrue(os.path.isdir(dir_path)) @@ -197,7 +188,6 @@ def testConstructWrapperWithExistingNonEmptyDumpRoot(self): session.Session(), dump_root=self._tmp_dir, log_usage=False) def testConstructWrapperWithExistingFileDumpRoot(self): - os.mkdir(self._tmp_dir) file_path = os.path.join(self._tmp_dir, "foo") open(file_path, "a").close() # Create the file self.assertTrue(os.path.isfile(file_path)) @@ -533,16 +523,6 @@ def testRuntimeErrorShouldBeCaught(self): tf_error = wrapped_sess.observers["tf_errors"][0] self.assertEqual("y", tf_error.op.name) - def testRuntimeErrorBeforeGraphExecutionIsRaised(self): - # Use an impossible device name to cause an error before graph execution. - with ops.device("/device:GPU:1337"): - w = variables.VariableV1([1.0] * 10, name="w") - - wrapped_sess = LocalCLIDebuggerWrapperSessionForTest( - [["run"]], self.sess, dump_root=self._tmp_dir) - with self.assertRaisesRegex(errors.OpError, r".*[Dd]evice.*1337.*"): - wrapped_sess.run(w) - def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self): wrapped_sess = LocalCLIDebuggerWrapperSessionForTest( [["run", "-f", "v_greater_than_twelve"], diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index eea6c986238944..4ecb3ba85c1df9 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -2167,14 +2167,13 @@ def testMyOperator(self): """ stream.flush() fd = stream.fileno() - tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir()) - tmp_file = open(tmp_file_path, "w") + tmp_file, tmp_file_path = tempfile.mkstemp(dir=self.get_temp_dir()) orig_fd = os.dup(fd) - os.dup2(tmp_file.fileno(), fd) + os.dup2(tmp_file, fd) try: yield CapturedWrites(tmp_file_path) finally: - tmp_file.close() + os.close(tmp_file) os.dup2(orig_fd, fd) def _AssertProtoEquals(self, a, b, msg=None): diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index 4e88cee70c1731..1698b991f3102e 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -582,6 +582,9 @@ tf_py_test( size = "small", srcs = ["fractional_avg_pool_op_test.py"], shard_count = 5, + tags = [ + "no_oss", + ], deps = [ "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", @@ -598,6 +601,9 @@ tf_py_test( size = "small", srcs = ["fractional_max_pool_op_test.py"], shard_count = 5, + tags = [ + "no_oss", + ], deps = [ "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py index e8dd2500078a1d..97ff2f539b1c92 100644 --- a/tensorflow/python/kernel_tests/array_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops_test.py @@ -1575,6 +1575,20 @@ def testUnravelIndexZeroDim(self): dims = constant_op.constant([3, 0], dtype=dtype) self.evaluate(array_ops.unravel_index(indices=indices, dims=dims)) + def testUnravelIndexIntegerOverflow(self): + with self.cached_session(): + for dtype in [dtypes.int32, dtypes.int64]: + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Input dims product is causing integer overflow"): + indices = constant_op.constant(-0x100000, dtype=dtype) + if dtype == dtypes.int32: + value = 0x10000000 + else: + value = 0x7FFFFFFFFFFFFFFF + dims = constant_op.constant([value, value], dtype=dtype) + self.evaluate(array_ops.unravel_index(indices=indices, dims=dims)) + class GuaranteeConstOpTest(test_util.TensorFlowTestCase): @@ -1703,6 +1717,21 @@ def f(a): output_grad = gradient_checker_v2.compute_gradient(f, [input_tensor]) self.assertAllClose(output_grad[0], np.zeros([1, 4, 4])) + def testOutOfBoundAxis(self): + input_tensor = constant_op.constant([1., 1.]) + input_min = [0] + input_max = [1] + q_input, _, _ = array_ops.quantize(input_tensor, 0, 1, dtypes.qint32) + error = (errors.InvalidArgumentError, ValueError) + with self.assertRaisesRegex(error, + r".*Axis must be less than input dimension.*"): + self.evaluate( + gen_array_ops.dequantize( + input=q_input, + min_range=input_min, + max_range=input_max, + axis=2**31 - 1)) + @test_util.run_all_in_graph_and_eager_modes class SortedSearchTest(test_util.TensorFlowTestCase): diff --git a/tensorflow/python/kernel_tests/attention_ops_test.py b/tensorflow/python/kernel_tests/attention_ops_test.py index fd83f4f6510c63..29a69778e55edc 100644 --- a/tensorflow/python/kernel_tests/attention_ops_test.py +++ b/tensorflow/python/kernel_tests/attention_ops_test.py @@ -305,7 +305,7 @@ def testGlimpseNonNormalizedNonCentered(self): def testGlimpseNegativeInput(self): img = np.arange(9).reshape([1, 3, 3, 1]) with self.test_session(): - with self.assertRaises((errors.InternalError, ValueError)): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): result = image_ops.extract_glimpse_v2( img, size=[1023, -63], diff --git a/tensorflow/python/kernel_tests/bincount_op_test.py b/tensorflow/python/kernel_tests/bincount_op_test.py index 4ca81333ab35fb..f08944c865dad6 100644 --- a/tensorflow/python/kernel_tests/bincount_op_test.py +++ b/tensorflow/python/kernel_tests/bincount_op_test.py @@ -332,6 +332,14 @@ def test_invalid_rank(self): gen_math_ops.dense_bincount( input=[[[1, 2, 3], [0, 3, 2]]], weights=[], size=10)) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.dense_bincount( + input=[0], size=[1, 1], weights=[3], binary_output=False)) + class SparseBincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @@ -499,6 +507,19 @@ def test_sparse_bincount_col_reduce_binary(self, dtype): weights=[], binary_output=True))) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.sparse_bincount( + indices=[[0], [1]], + values=[0, 0], + dense_shape=[1, 1], + size=[1, 1], + weights=[0, 0], + binary_output=False)) + class RaggedBincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @@ -638,6 +659,19 @@ def test_ragged_bincount_binary_np_with_weights(self, dtype): size=size, binary_output=True))) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.ragged_bincount( + splits=[0, 0, 1], + values=[1], + size=[1, 1], + weights=[0, 0, 0], + binary_output=False, + name=None)) + if __name__ == "__main__": googletest.main() diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py index da4f4f86b0220d..3f1401baa730ec 100644 --- a/tensorflow/python/kernel_tests/concat_op_test.py +++ b/tensorflow/python/kernel_tests/concat_op_test.py @@ -20,6 +20,7 @@ import numpy as np +from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl @@ -574,6 +575,17 @@ def testConcatInvalidAxis(self): t2 = [2] gen_array_ops.concat_v2([t1, t2], 1).eval() + def testConcatInvalidAxisInTfFunction(self): + + @def_function.function + def concat_wrapper(): + y = gen_array_ops.concat_v2( + values=[[1, 2, 3], [4, 5, 6]], axis=0xb500005b) + return y + + with self.assertRaises(ValueError): + concat_wrapper() + def testConcatNegativeAxis(self): with test_util.use_gpu(): t1 = [[1, 2, 3], [4, 5, 6]] diff --git a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py index 0d5928aefacf5a..54be2f4844f12c 100644 --- a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py +++ b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py @@ -24,6 +24,7 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops @@ -310,6 +311,32 @@ def testDifferentInputTensorShape(self): input_b, row_seq, col_seq, overlapping) self.assertSequenceEqual(expected.shape, actual.shape) + def testNegativeSeqValuesForGradOp(self): + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Row sequence tensor values must not be negative.*"): + y = nn_ops.gen_nn_ops.fractional_avg_pool_grad( + orig_input_tensor_shape=[2, 2, 2, 2], + out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, + 12]]]], + row_pooling_sequence=[-10, 1, 2, 3], + col_pooling_sequence=[1, 2, 3, 4], + overlapping=True) + + self.evaluate(y) + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Column sequence tensor values must not be negative.*"): + z = nn_ops.gen_nn_ops.fractional_avg_pool_grad( + orig_input_tensor_shape=[2, 2, 2, 2], + out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, + 12]]]], + row_pooling_sequence=[10, 1, 2, 3], + col_pooling_sequence=[1, 2, -3, 4], + overlapping=True) + + self.evaluate(z) + class FractionalAvgPoolGradTest(test.TestCase): """Tests for FractionalAvgPoolGrad. diff --git a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py index 2b1e30a8bbd606..3e19a9a4a27277 100644 --- a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py +++ b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py @@ -307,6 +307,22 @@ def testDifferentInputTensorShape(self): input_b, row_seq, col_seq, overlapping) self.assertSequenceEqual(expected.shape, actual.shape) + def testDeterminismExceptionThrowing(self): + tensor_shape = (5, 20, 20, 3) + rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500 + with test_util.deterministic_ops(): + with self.assertRaisesRegex( + ValueError, "requires a non-zero seed to be passed in when " + "determinism is enabled"): + nn_ops.fractional_max_pool_v2(rand_mat, [1, 1.5, 1.5, 1]) + nn_ops.fractional_max_pool_v2(rand_mat, [1, 1.5, 1.5, 1], seed=1) + + with self.assertRaisesRegex(ValueError, + 'requires "seed" and "seed2" to be non-zero'): + nn_ops.fractional_max_pool(rand_mat, [1, 1.5, 1.5, 1]) + nn_ops.fractional_max_pool( + rand_mat, [1, 1.5, 1.5, 1], seed=1, seed2=1, deterministic=True) + class FractionalMaxPoolGradTest(test.TestCase): """Tests for FractionalMaxPoolGrad. diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py index 7d7a1907e42ac5..d13721146b5ca9 100644 --- a/tensorflow/python/kernel_tests/init_ops_test.py +++ b/tensorflow/python/kernel_tests/init_ops_test.py @@ -546,7 +546,7 @@ def testMixedDType(self): def testLargeStarts(self): # Test case for GitHub issue 46899. with self.session(): - with self.assertRaises(errors_impl.InternalError): + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): v = math_ops.range(start=-1e+38, limit=1) self.evaluate(v) diff --git a/tensorflow/python/kernel_tests/logging_ops_test.py b/tensorflow/python/kernel_tests/logging_ops_test.py index b3c30f07d149b7..cb1645d407d8fc 100644 --- a/tensorflow/python/kernel_tests/logging_ops_test.py +++ b/tensorflow/python/kernel_tests/logging_ops_test.py @@ -274,7 +274,7 @@ def testPrintOneTensorStdout(self): self.assertIn((expected + "\n"), printed.contents()) def testPrintTensorsToFile(self): - tmpfile_name = tempfile.mktemp(".printv2_test") + fd, tmpfile_name = tempfile.mkstemp(".printv2_test") tensor_0 = math_ops.range(0, 10) print_op_0 = logging_ops.print_v2(tensor_0, output_stream="file://"+tmpfile_name) @@ -284,14 +284,14 @@ def testPrintTensorsToFile(self): output_stream="file://"+tmpfile_name) self.evaluate(print_op_1) try: - f = open(tmpfile_name, "r") + f = os.fdopen(fd, "r") line_0 = f.readline() expected_0 = "[0 1 2 ... 7 8 9]" self.assertTrue(expected_0 in line_0) line_1 = f.readline() expected_1 = "[11 12 13 ... 17 18 19]" self.assertTrue(expected_1 in line_1) - f.close() + os.close(fd) os.remove(tmpfile_name) except IOError as e: self.fail(e) diff --git a/tensorflow/python/kernel_tests/map_stage_op_test.py b/tensorflow/python/kernel_tests/map_stage_op_test.py index 516fc37517ca57..8600ad1f8d726b 100644 --- a/tensorflow/python/kernel_tests/map_stage_op_test.py +++ b/tensorflow/python/kernel_tests/map_stage_op_test.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +import numpy as np -from tensorflow.python.framework import errors +from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops @@ -32,7 +31,7 @@ class MapStageTest(test.TestCase): @test_util.run_deprecated_v1 def testSimple(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) @@ -44,9 +43,9 @@ def testSimple(self): k, y = stager.get(gi) y = math_ops.reduce_max(math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -54,7 +53,7 @@ def testSimple(self): @test_util.run_deprecated_v1 def testMultiple(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) @@ -66,9 +65,9 @@ def testMultiple(self): k, (z, y) = stager.get(gi) y = math_ops.reduce_max(z * math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -77,26 +76,25 @@ def testMultiple(self): @test_util.run_deprecated_v1 def testDictionary(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) v = 2. * (array_ops.zeros([128, 128]) + x) with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.float32, dtypes.float32], - shapes=[[], [128, 128]], - names=['x', 'v']) + stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32], + shapes=[[], [128, 128]], + names=['x', 'v']) stage = stager.put(pi, {'x': x, 'v': v}) key, ret = stager.get(gi) z = ret['x'] y = ret['v'] y = math_ops.reduce_max(z * math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -106,7 +104,7 @@ def testDictionary(self): def testColocation(self): gpu_dev = test.gpu_device_name() - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) v = 2. * (array_ops.zeros([128, 128]) + x) @@ -123,58 +121,56 @@ def testColocation(self): self.assertEqual(y.device, '/device:CPU:0') self.assertEqual(z[0].device, '/device:CPU:0') - G.finalize() + g.finalize() @test_util.run_deprecated_v1 def testPeek(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) p = array_ops.placeholder(dtypes.int32, name='p') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], shapes=[[]]) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], shapes=[[]]) stage = stager.put(pi, [x], [0]) peek = stager.peek(gi) size = stager.size() - G.finalize() + g.finalize() n = 10 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: for i in range(n): sess.run(stage, feed_dict={x: i, pi: i}) for i in range(n): - self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i) + self.assertEqual(sess.run(peek, feed_dict={gi: i})[0], i) - self.assertTrue(sess.run(size) == 10) + self.assertEqual(sess.run(size), 10) @test_util.run_deprecated_v1 def testSizeAndClear(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32, name='x') pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) v = 2. * (array_ops.zeros([128, 128]) + x) with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.float32, dtypes.float32], - shapes=[[], [128, 128]], - names=['x', 'v']) + stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32], + shapes=[[], [128, 128]], + names=['x', 'v']) stage = stager.put(pi, {'x': x, 'v': v}) size = stager.size() clear = stager.clear() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 3}) self.assertEqual(sess.run(size), 1) sess.run(stage, feed_dict={x: -1, pi: 1}) @@ -186,22 +182,23 @@ def testSizeAndClear(self): def testCapacity(self): capacity = 3 - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], capacity=capacity, shapes=[[]]) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], + capacity=capacity, + shapes=[[]]) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() from six.moves import queue as Queue import threading @@ -209,7 +206,7 @@ def testCapacity(self): queue = Queue.Queue() n = 8 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -238,13 +235,13 @@ def thread_run(): capacity)) # Should have capacity elements in the staging area - self.assertTrue(sess.run(size) == capacity) + self.assertEqual(sess.run(size), capacity) # Clear the staging area completely for i in range(n): sess.run(get) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testMemoryLimit(self): @@ -252,28 +249,28 @@ def testMemoryLimit(self): chunk = 200 * 1024 # 256K capacity = memory_limit // chunk - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.uint8, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.uint8], memory_limit=memory_limit, shapes=[[]]) + stager = data_flow_ops.MapStagingArea([dtypes.uint8], + memory_limit=memory_limit, + shapes=[[]]) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() from six.moves import queue as Queue import threading - import numpy as np queue = Queue.Queue() n = 8 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -303,56 +300,57 @@ def thread_run(): capacity)) # Should have capacity elements in the staging area - self.assertTrue(sess.run(size) == capacity) + self.assertEqual(sess.run(size), capacity) # Clear the staging area completely for i in range(n): sess.run(get) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testOrdering(self): import six import random - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], shapes=[[]], ordered=True) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], + shapes=[[]], + ordered=True) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() n = 10 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Keys n-1..0 keys = list(reversed(six.moves.range(n))) for i in keys: sess.run(stage, feed_dict={pi: i, x: i}) - self.assertTrue(sess.run(size) == n) + self.assertEqual(sess.run(size), n) # Check that key, values come out in ascending order for i, k in enumerate(reversed(keys)): get_key, values = sess.run(get) self.assertTrue(i == k == get_key == values) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testPartialDictInsert(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -370,41 +368,39 @@ def testPartialDictInsert(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain tuple associated with key 0 - self.assertTrue( - sess.run([key, ret], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key, ret], feed_dict={gi: 0}), + [0, { 'x': 1, 'f': 2, 'v': 1 }]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 3}) # We can now obtain tuple associated with key 1 - self.assertTrue( - sess.run([key, ret], feed_dict={ - gi: 1 - }) == [1, { + self.assertEqual( + sess.run([key, ret], feed_dict={gi: 1}), + [1, { 'x': 1, 'f': 2, 'v': 3 @@ -412,7 +408,7 @@ def testPartialDictInsert(self): @test_util.run_deprecated_v1 def testPartialIndexInsert(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -428,35 +424,35 @@ def testPartialIndexInsert(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain tuple associated with key 0 - self.assertTrue(sess.run([key, ret], feed_dict={gi: 0}) == [0, [1, 1, 2]]) + self.assertEqual(sess.run([key, ret], feed_dict={gi: 0}), [0, [1, 1, 2]]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 3}) # We can now obtain tuple associated with key 1 - self.assertTrue(sess.run([key, ret], feed_dict={gi: 1}) == [1, [1, 3, 2]]) + self.assertEqual(sess.run([key, ret], feed_dict={gi: 1}), [1, [1, 3, 2]]) @test_util.run_deprecated_v1 def testPartialDictGetsAndPeeks(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -480,40 +476,38 @@ def testPartialDictGetsAndPeeks(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now peek at 'x' and 'f' values associated with key 0 - self.assertTrue(sess.run(peek_xf, feed_dict={pei: 0}) == {'x': 1, 'f': 2}) + self.assertEqual(sess.run(peek_xf, feed_dict={pei: 0}), {'x': 1, 'f': 2}) # Peek at 'v' value associated with key 0 - self.assertTrue(sess.run(peek_v, feed_dict={pei: 0}) == {'v': 1}) + self.assertEqual(sess.run(peek_v, feed_dict={pei: 0}), {'v': 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain 'x' and 'f' values associated with key 0 - self.assertTrue( - sess.run([key_xf, get_xf], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, { 'x': 1, 'f': 2 }]) # Still have 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can no longer get 'x' and 'f' from key 0 with self.assertRaises(errors.InvalidArgumentError) as cm: @@ -521,40 +515,36 @@ def testPartialDictGetsAndPeeks(self): exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.') - self.assertTrue(exc_str in cm.exception.message) + self.assertIn(exc_str, cm.exception.message) # Obtain 'v' value associated with key 0 - self.assertTrue( - sess.run([key_v, get_v], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key_v, get_v], feed_dict={gi: 0}), [0, { 'v': 1 }]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Pop without key to obtain 'x' and 'f' values associated with key 1 - self.assertTrue(sess.run([pop_key_xf, pop_xf]) == [1, {'x': 1, 'f': 2}]) + self.assertEqual(sess.run([pop_key_xf, pop_xf]), [1, {'x': 1, 'f': 2}]) # still 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # We can now obtain 'x' and 'f' values associated with key 1 - self.assertTrue( - sess.run([pop_key_v, pop_v], feed_dict={ - pi: 1 - }) == [1, { + self.assertEqual( + sess.run([pop_key_v, pop_v], feed_dict={pi: 1}), [1, { 'v': 1 }]) # Nothing is left - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) @test_util.run_deprecated_v1 def testPartialIndexGets(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -572,28 +562,72 @@ def testPartialIndexGets(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage complete tuple sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3}) - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Partial get using indices - self.assertTrue( - sess.run([key_xf, get_xf], feed_dict={ - gi: 0 - }) == [0, [1, 2]]) + self.assertEqual( + sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, [1, 2]]) # Still some of key 0 left - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Partial get of remaining index - self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]]) + self.assertEqual(sess.run([key_v, get_v], feed_dict={gi: 0}), [0, [3]]) # All gone - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) + + @test_util.run_deprecated_v1 + def testNonScalarKeyOrderedMap(self): + with ops.Graph().as_default() as g: + x = array_ops.placeholder(dtypes.float32) + v = 2. * (array_ops.zeros([128, 128]) + x) + t = data_flow_ops.gen_data_flow_ops.ordered_map_stage( + key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64), + indices=np.array([[6]]), + values=[x, v], + dtypes=[dtypes.int64], + capacity=0, + memory_limit=0, + container='container1', + shared_name='', + name=None) + + g.finalize() + + with self.session(graph=g) as sess: + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'key must be an int64 scalar'): + sess.run(t, feed_dict={x: 1}) + + @test_util.run_deprecated_v1 + def testNonScalarKeyUnorderedMap(self): + with ops.Graph().as_default() as g: + x = array_ops.placeholder(dtypes.float32) + v = 2. * (array_ops.zeros([128, 128]) + x) + t = data_flow_ops.gen_data_flow_ops.map_stage( + key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64), + indices=np.array([[6]]), + values=[x, v], + dtypes=[dtypes.int64], + capacity=0, + memory_limit=0, + container='container1', + shared_name='', + name=None) + + g.finalize() + + with self.session(graph=g) as sess: + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'key must be an int64 scalar'): + sess.run(t, feed_dict={x: 1}) if __name__ == '__main__': diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py index 4313fced7323ff..b414e0daa9725c 100644 --- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py @@ -277,7 +277,7 @@ def testInvalidIds(self): math_ops.segment_prod, ]: with self.cached_session(use_gpu=False): - with self.assertRaises((ValueError, errors_impl.InternalError)): + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): s = op(data=np.ones((1, 10, 1)), segment_ids=[1676240524292489355]) self.evaluate(s) @@ -757,6 +757,19 @@ def testSegmentsInvalid7(self): with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) + @test_util.run_deprecated_v1 + def testSegmentsInvalid8(self): + tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) + ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] + segment_indices = [2**62 - 1] + tf_indices = [2**62 - 1] + with self.session(use_gpu=False): + for tf_op in ops_list: + s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) + with self.assertRaisesOpError( + "Encountered overflow when multiplying"): + self.evaluate(s) + def testSegmentWithNumSegmentsValid(self): # Baseline for the test*WithNumSegmentsInvalid* methods below. tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) diff --git a/tensorflow/python/kernel_tests/shape_ops_test.py b/tensorflow/python/kernel_tests/shape_ops_test.py index cfd216f1d3fbe8..3380d47c88accd 100644 --- a/tensorflow/python/kernel_tests/shape_ops_test.py +++ b/tensorflow/python/kernel_tests/shape_ops_test.py @@ -728,7 +728,7 @@ def testLargeTensor(self): if test_util.is_xla_enabled(): # The following test fails with XLA enabled. return - with self.assertRaises(errors_impl.InternalError): + with self.assertRaises(errors_impl.InvalidArgumentError): with self.cached_session(): tiled = array_ops.tile( np.ones((1, 1, 1)), [100000000, 100000000, 100000000]) diff --git a/tensorflow/python/kernel_tests/sparse_ops_test.py b/tensorflow/python/kernel_tests/sparse_ops_test.py index bef02fb6dbb48d..bfaf7f6f03dc8f 100644 --- a/tensorflow/python/kernel_tests/sparse_ops_test.py +++ b/tensorflow/python/kernel_tests/sparse_ops_test.py @@ -786,6 +786,39 @@ def disabledtestSparseReduceSumOrMaxShape(self): self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum) + def testIntegerOverflow(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_max( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_max_sparse( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_sum( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + class SparseMathOpsTest(test_util.TensorFlowTestCase): @@ -951,6 +984,25 @@ def testGradient(self): y_tf.values, (nnz,)) self.assertLess(err, 1e-4) + def testIntegerOverflow(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_softmax( + sp_indices=[[1, 1]], + sp_values=[2.0], + sp_shape=[2**32, 2**31], + name=None) + + self.evaluate(res) + + def testReshapeNegativeShape(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_softmax( + sp_indices=[[1, 1]], sp_values=[2.0], sp_shape=[-1, 1], name=None) + + self.evaluate(res) + class SparseMinimumMaximumTest(test_util.TensorFlowTestCase): diff --git a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py index ab98c9a3deb718..146549859a7bea 100644 --- a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py @@ -103,6 +103,28 @@ def testSameShape(self): self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.dense_shape, input_val.dense_shape) + def testReshapeIntegeroverflow(self): + with self.session(): + with self.assertRaises(errors.InvalidArgumentError): + sp_output = sparse_ops.gen_sparse_ops.sparse_reshape( + input_indices=[[0, 0]], + input_shape=[2**32, 2**31], + new_shape=[1, 1], + name=None) + + self.evaluate(sp_output) + + def testReshapeNegativeShape(self): + with self.session(): + with self.assertRaises(errors.InvalidArgumentError): + sp_output = sparse_ops.gen_sparse_ops.sparse_reshape( + input_indices=[[0, 0]], + input_shape=[1, -1], + new_shape=[1, 1], + name=None) + + self.evaluate(sp_output) + @test_util.run_deprecated_v1 def testFeedSameShape(self): with self.session() as sess: diff --git a/tensorflow/python/kernel_tests/sparse_slice_op_test.py b/tensorflow/python/kernel_tests/sparse_slice_op_test.py index a363f80c2fbf00..3c70f108898b17 100644 --- a/tensorflow/python/kernel_tests/sparse_slice_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_slice_op_test.py @@ -20,6 +20,7 @@ import numpy as np +from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker @@ -261,6 +262,27 @@ def testGradients(self): [sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,)) self.assertLess(err, 1e-3) + def testNegativeSize(self): + with self.session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_slice( + indices=[[0, 0]], + values=[0], + shape=[1, 1], + start=[10, 10], + size=[-100, 100]) + self.evaluate(res) + + def testLargeSize(self): + with self.session(use_gpu=False): + # Confirm potential integer overflow due to size is handled by op. + res = sparse_ops.gen_sparse_ops.sparse_slice( + indices=[[0, 0]], + values=[0], + shape=[1, 1], + start=[2**62, -1], + size=[2**62, 2**62]) + self.evaluate(res) if __name__ == '__main__': test.main() diff --git a/tensorflow/python/kernel_tests/sparse_split_op_test.py b/tensorflow/python/kernel_tests/sparse_split_op_test.py index 31ef1129f1319c..b5cc3f02d9d4c7 100644 --- a/tensorflow/python/kernel_tests/sparse_split_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_split_op_test.py @@ -257,6 +257,15 @@ def testArgumentErrors(self): with self.assertRaisesRegex(ValueError, 'axis is required'): sparse_ops.sparse_split(num_split=2, sp_input=1) + def testInvalidArgumentError(self): + # Test case for GitHub issue 53660. + axis = [1, 2] + with self.assertRaisesRegexp(errors.InvalidArgumentError, + r'axis should be a scalar'): + self.evaluate( + sparse_ops.sparse_split( + sp_input=self._SparseTensor_4x6(), num_split=3, axis=axis)) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py index 67998d609eab04..1909b6eb1b84fb 100644 --- a/tensorflow/python/ops/image_ops_test.py +++ b/tensorflow/python/ops/image_ops_test.py @@ -2260,7 +2260,7 @@ def testInvalidInput(self): # TODO(b/200850176): test fails with XLA. return with self.session(): - with self.assertRaises(errors_impl.InternalError): + with self.assertRaises(errors_impl.InvalidArgumentError): v = image_ops.pad_to_bounding_box( image=np.ones((1, 1, 1)), target_height=5191549470, @@ -3178,7 +3178,7 @@ def testPreserveAspectRatioSquare(self): def testLargeDim(self): with self.session(): - with self.assertRaises(errors.InternalError): + with self.assertRaises(errors.InvalidArgumentError): x = np.ones((5, 1, 1, 2)) v = image_ops.resize_images_v2(x, [1610637938, 1610637938], image_ops.ResizeMethod.BILINEAR) @@ -6051,7 +6051,7 @@ def testImageCropAndResize(self): def DISABLED_testImageCropAndResizeWithInvalidInput(self): with self.session(): - with self.assertRaises((errors.InternalError, ValueError)): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): op = image_ops_impl.crop_and_resize_v2( image=np.ones((1, 1, 1, 1)), boxes=np.ones((11, 4)), diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD index 75acc2a9712a40..76565faf917152 100644 --- a/tensorflow/python/ops/parallel_for/BUILD +++ b/tensorflow/python/ops/parallel_for/BUILD @@ -113,6 +113,7 @@ cuda_py_test( srcs = ["control_flow_ops_test.py"], shard_count = 16, tags = [ + "no_oss", "no_rocm", ], deps = [ diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index 6706ef194b221a..8891edba481fee 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -32,7 +32,6 @@ @test_util.run_all_in_graph_and_eager_modes -@test_util.disable_tfrt class RawOpsTest(test.TestCase, parameterized.TestCase): def testSimple(self): @@ -67,8 +66,9 @@ def testDefaults(self): @parameterized.parameters([[0, 8]], [[-1, 6]]) def testStringNGramsBadDataSplits(self, splits): data = ["aa", "bb", "cc", "dd", "ee", "ff"] - with self.assertRaisesRegex(errors.InvalidArgumentError, - "Invalid split value"): + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Invalid split value|First split value must be 0"): self.evaluate( gen_string_ops.string_n_grams( data=data, @@ -80,6 +80,37 @@ def testStringNGramsBadDataSplits(self, splits): pad_width=0, preserve_short_sequences=False)) + def testStringSplit(self): + data = ["123456"] + data_splits = [0, 1] + separator = "a" * 15 + ngram_widths = [] + pad_width = -5 + left_pad = right_pad = "" + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Pad width should be >= 0"): + self.evaluate(gen_string_ops.string_n_grams( + data=data, + data_splits=data_splits, + separator=separator, + ngram_widths=ngram_widths, + left_pad=left_pad, + right_pad=right_pad, + pad_width=pad_width, + preserve_short_sequences=True)) + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Pad width could lead to integer overflow"): + self.evaluate( + gen_string_ops.string_n_grams( + data=["000.0", "000.0"], + data_splits=[0, 2], + separator="", + ngram_widths=[2**30, 2**30], + left_pad=" ", + right_pad=" ", + pad_width=-2**30, + preserve_short_sequences=False)) + def testGetSessionHandle(self): if context.executing_eagerly(): with self.assertRaisesRegex( diff --git a/tensorflow/python/saved_model/load_test.py b/tensorflow/python/saved_model/load_test.py index 66245452f172b8..c593a377fbd1d1 100644 --- a/tensorflow/python/saved_model/load_test.py +++ b/tensorflow/python/saved_model/load_test.py @@ -204,8 +204,8 @@ def test_control_outputs(self, cycles): imported_graph.control_outputs) def _make_asset(self, contents): - filename = tempfile.mktemp(prefix=self.get_temp_dir()) - with open(filename, "w") as f: + fd, filename = tempfile.mkstemp(prefix=self.get_temp_dir()) + with os.fdopen(fd, "w") as f: f.write(contents) return filename diff --git a/tensorflow/python/saved_model/load_v1_in_v2.py b/tensorflow/python/saved_model/load_v1_in_v2.py index 8d0160bc3a606b..6f950c425b9eb4 100644 --- a/tensorflow/python/saved_model/load_v1_in_v2.py +++ b/tensorflow/python/saved_model/load_v1_in_v2.py @@ -138,7 +138,7 @@ def _extract_signatures(self, wrapped, meta_graph_def): for signature_key, signature_def in meta_graph_def.signature_def.items(): if signature_def.inputs: input_items = sorted( - signature_def.inputs.items(), key=lambda item: item[1].name) + signature_def.inputs.items(), key=lambda item: item[0]) original_input_names, input_specs = zip(*input_items) else: original_input_names = [] diff --git a/tensorflow/python/saved_model/load_v1_in_v2_test.py b/tensorflow/python/saved_model/load_v1_in_v2_test.py index b854e588f71c15..22e212cffa4bd8 100644 --- a/tensorflow/python/saved_model/load_v1_in_v2_test.py +++ b/tensorflow/python/saved_model/load_v1_in_v2_test.py @@ -696,6 +696,33 @@ def test_v1_input_ordered(self): self.assertEqual(imported.signatures["serving_default"].inputs[1].name, "input2:0") + def test_resave_signature(self): + # Tests that signatures saved using TF1 can be resaved with TF2. + # See b/211666001 for context. + export_graph = ops.Graph() + with export_graph.as_default(): + a = array_ops.placeholder( + shape=[None, 1], dtype=dtypes.float32, name="input_2") + b = array_ops.placeholder( + shape=[None, 2], dtype=dtypes.float32, name="input_1") + c = array_ops.identity(a) + with session_lib.Session() as session: + path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) + simple_save.simple_save( + session, + path, + inputs={"a": a, "b": b}, + outputs={"c": c}) + imported = load.load(path) + path2 = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) + save.save(imported, path2, imported.signatures) + + imported2 = load.load(path2) + self.assertEqual( + imported2.signatures["serving_default"]( + a=constant_op.constant([5.]), + b=constant_op.constant([1., 3.]))["c"].numpy(), 5.) + if __name__ == "__main__": test.main() diff --git a/tensorflow/stream_executor/lib/BUILD b/tensorflow/stream_executor/lib/BUILD index d0f57112471860..4cf31bad7079d2 100644 --- a/tensorflow/stream_executor/lib/BUILD +++ b/tensorflow/stream_executor/lib/BUILD @@ -1,6 +1,6 @@ load("//tensorflow:tensorflow.bzl", "filegroup") load("//tensorflow/core/platform:rules_cc.bzl", "cc_library") -load("//tensorflow:tensorflow.bzl", "if_windows", "tf_cc_test") +load("//tensorflow:tensorflow.bzl", "if_windows") load("//tensorflow/stream_executor:build_defs.bzl", "stream_executor_friends") package( @@ -36,21 +36,10 @@ cc_library( deps = [ "//tensorflow/core:lib", "//tensorflow/core:protos_all_cc", + "//tensorflow/core/platform:statusor", "//tensorflow/stream_executor/platform", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", ], ) - -tf_cc_test( - name = "statusor_test", - size = "small", - srcs = ["statusor_test.cc"], - deps = [ - ":lib", - "//tensorflow/core:lib", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - ], -) diff --git a/tensorflow/stream_executor/lib/statusor.h b/tensorflow/stream_executor/lib/statusor.h index 2243fb1b34aa7d..0d574ce6984456 100644 --- a/tensorflow/stream_executor/lib/statusor.h +++ b/tensorflow/stream_executor/lib/statusor.h @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,384 +13,18 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// StatusOr is the union of a Status object and a T object. StatusOr models -// the concept of an object that is either a value, or an error Status -// explaining why such a value is not present. To this end, StatusOr does not -// allow its Status value to be Status::OK. -// -// The primary use-case for StatusOr is as the return value of a -// function which may fail. -// -// Example client usage for a StatusOr, where T is not a pointer: -// -// StatusOr result = DoBigCalculationThatCouldFail(); -// if (result.ok()) { -// float answer = result.ValueOrDie(); -// printf("Big calculation yielded: %f", answer); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example client usage for a StatusOr: -// -// StatusOr result = FooFactory::MakeNewFoo(arg); -// if (result.ok()) { -// std::unique_ptr foo(result.ValueOrDie()); -// foo->DoSomethingCool(); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example client usage for a StatusOr>: -// -// StatusOr> result = FooFactory::MakeNewFoo(arg); -// if (result.ok()) { -// std::unique_ptr foo = std::move(result.ValueOrDie()); -// foo->DoSomethingCool(); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example factory implementation returning StatusOr: -// -// StatusOr FooFactory::MakeNewFoo(int arg) { -// if (arg <= 0) { -// return tensorflow::InvalidArgument("Arg must be positive"); -// } else { -// return new Foo(arg); -// } -// } -// -// Note that the assignment operators require that destroying the currently -// stored value cannot invalidate the argument; in other words, the argument -// cannot be an alias for the current value, or anything owned by the current -// value. #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ -#include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/stream_executor/lib/status.h" -#include "tensorflow/stream_executor/lib/statusor_internals.h" namespace stream_executor { namespace port { -#if defined(__clang__) -// Only clang supports warn_unused_result as a type annotation. -template -class TF_MUST_USE_RESULT StatusOr; -#endif - -template -class StatusOr : private internal_statusor::StatusOrData, - private internal_statusor::TraitsBase< - std::is_copy_constructible::value, - std::is_move_constructible::value> { - template - friend class StatusOr; - - typedef internal_statusor::StatusOrData Base; - - public: - typedef T element_type; // DEPRECATED: use `value_type`. - typedef T value_type; - - // Constructs a new StatusOr with Status::UNKNOWN status. This is marked - // 'explicit' to try to catch cases like 'return {};', where people think - // StatusOr> will be initialized with an empty vector, - // instead of a Status::UNKNOWN status. - explicit StatusOr(); - - // StatusOr will be copy constructible/assignable if T is copy - // constructible. - StatusOr(const StatusOr&) = default; - StatusOr& operator=(const StatusOr&) = default; - - // StatusOr will be move constructible/assignable if T is move - // constructible. - StatusOr(StatusOr&&) = default; - StatusOr& operator=(StatusOr&&) = default; - - // Conversion copy/move constructor, T must be convertible from U. - template ::value>::type* = nullptr> - StatusOr(const StatusOr& other); - template ::value>::type* = nullptr> - StatusOr(StatusOr&& other); - - // Conversion copy/move assignment operator, T must be convertible from U. - template ::value>::type* = nullptr> - StatusOr& operator=(const StatusOr& other); - template ::value>::type* = nullptr> - StatusOr& operator=(StatusOr&& other); - - // Constructs a new StatusOr with the given value. After calling this - // constructor, calls to ValueOrDie() will succeed, and calls to status() will - // return OK. - // - // NOTE: Not explicit - we want to use StatusOr as a return type - // so it is convenient and sensible to be able to do 'return T()' - // when the return type is StatusOr. - // - // REQUIRES: T is copy constructible. - StatusOr(const T& value); - - // Constructs a new StatusOr with the given non-ok status. After calling - // this constructor, calls to ValueOrDie() will CHECK-fail. - // - // NOTE: Not explicit - we want to use StatusOr as a return - // value, so it is convenient and sensible to be able to do 'return - // Status()' when the return type is StatusOr. - // - // REQUIRES: !status.ok(). This requirement is DCHECKed. - // In optimized builds, passing Status::OK() here will have the effect - // of passing tensorflow::error::INTERNAL as a fallback. - StatusOr(const Status& status); - StatusOr& operator=(const Status& status); - - // TODO(b/62186997): Add operator=(T) overloads. - - // Similar to the `const T&` overload. - // - // REQUIRES: T is move constructible. - StatusOr(T&& value); - - // RValue versions of the operations declared above. - StatusOr(Status&& status); - StatusOr& operator=(Status&& status); - - // Returns this->status().ok() - bool ok() const { return this->status_.ok(); } - - // Returns a reference to our status. If this contains a T, then - // returns Status::OK(). - const Status& status() const &; - Status status() &&; - - // Returns a reference to our current value, or CHECK-fails if !this->ok(). - // - // Note: for value types that are cheap to copy, prefer simple code: - // - // T value = statusor.ValueOrDie(); - // - // Otherwise, if the value type is expensive to copy, but can be left - // in the StatusOr, simply assign to a reference: - // - // T& value = statusor.ValueOrDie(); // or `const T&` - // - // Otherwise, if the value type supports an efficient move, it can be - // used as follows: - // - // T value = std::move(statusor).ValueOrDie(); - // - // The std::move on statusor instead of on the whole expression enables - // warnings about possible uses of the statusor object after the move. - // C++ style guide waiver for ref-qualified overloads granted in cl/143176389 - // See go/ref-qualifiers for more details on such overloads. - const T& ValueOrDie() const &; - T& ValueOrDie() &; - const T&& ValueOrDie() const &&; - T&& ValueOrDie() &&; - - // Returns a reference to the current value. - // - // REQUIRES: this->ok() == true, otherwise the behavior is undefined. - // - // Use this->ok() or `operator bool()` to verify that there is a current - // value. Alternatively, see ValueOrDie() for a similar API that guarantees - // CHECK-failing if there is no current value. - const T& operator*() const&; - T& operator*() &; - const T&& operator*() const&&; - T&& operator*() &&; - - // Returns a pointer to the current value. - // - // REQUIRES: this->ok() == true, otherwise the behavior is undefined. - // - // Use this->ok() or `operator bool()` to verify that there is a current - // value. - const T* operator->() const; - T* operator->(); - - T ConsumeValueOrDie() { return std::move(ValueOrDie()); } - - // Ignores any errors. This method does nothing except potentially suppress - // complaints from any tools that are checking that errors are not dropped on - // the floor. - void IgnoreError() const; -}; - -//////////////////////////////////////////////////////////////////////////////// -// Implementation details for StatusOr - -template -StatusOr::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {} - -template -StatusOr::StatusOr(const T& value) : Base(value) {} - -template -StatusOr::StatusOr(const Status& status) : Base(status) {} - -template -StatusOr& StatusOr::operator=(const Status& status) { - this->Assign(status); - return *this; -} - -template -StatusOr::StatusOr(T&& value) : Base(std::move(value)) {} - -template -StatusOr::StatusOr(Status&& status) : Base(std::move(status)) {} - -template -StatusOr& StatusOr::operator=(Status&& status) { - this->Assign(std::move(status)); - return *this; -} - -template -template ::value>::type*> -inline StatusOr::StatusOr(const StatusOr& other) - : Base(static_cast::Base&>(other)) {} - -template -template ::value>::type*> -inline StatusOr& StatusOr::operator=(const StatusOr& other) { - if (other.ok()) - this->Assign(other.ValueOrDie()); - else - this->Assign(other.status()); - return *this; -} - -template -template ::value>::type*> -inline StatusOr::StatusOr(StatusOr&& other) - : Base(static_cast::Base&&>(other)) {} - -template -template ::value>::type*> -inline StatusOr& StatusOr::operator=(StatusOr&& other) { - if (other.ok()) { - this->Assign(std::move(other).ValueOrDie()); - } else { - this->Assign(std::move(other).status()); - } - return *this; -} - -template -const Status& StatusOr::status() const & { - return this->status_; -} -template -Status StatusOr::status() && { - // Note that we copy instead of moving the status here so that - // ~StatusOrData() can call ok() without invoking UB. - return ok() ? Status::OK() : this->status_; -} - -template -const T& StatusOr::ValueOrDie() const & { - this->EnsureOk(); - return this->data_; -} - -template -T& StatusOr::ValueOrDie() & { - this->EnsureOk(); - return this->data_; -} - -template -const T&& StatusOr::ValueOrDie() const && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -T&& StatusOr::ValueOrDie() && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -const T* StatusOr::operator->() const { - this->EnsureOk(); - return &this->data_; -} - -template -T* StatusOr::operator->() { - this->EnsureOk(); - return &this->data_; -} - -template -const T& StatusOr::operator*() const& { - this->EnsureOk(); - return this->data_; -} - -template -T& StatusOr::operator*() & { - this->EnsureOk(); - return this->data_; -} - -template -const T&& StatusOr::operator*() const&& { - this->EnsureOk(); - return std::move(this->data_); -} - -template -T&& StatusOr::operator*() && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -void StatusOr::IgnoreError() const { - // no-op -} +using tensorflow::StatusOr; } // namespace port - -#define TF_ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ - TF_ASSERT_OK_AND_ASSIGN_IMPL( \ - TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, \ - rexpr); - -#define TF_ASSERT_OK_AND_ASSIGN_IMPL(statusor, lhs, rexpr) \ - auto statusor = (rexpr); \ - ASSERT_TRUE(statusor.status().ok()) << statusor.status(); \ - lhs = std::move(statusor.ValueOrDie()) - -#define TF_STATUS_MACROS_CONCAT_NAME(x, y) TF_STATUS_MACROS_CONCAT_IMPL(x, y) -#define TF_STATUS_MACROS_CONCAT_IMPL(x, y) x##y - -#define TF_ASSIGN_OR_RETURN(lhs, rexpr) \ - TF_ASSIGN_OR_RETURN_IMPL( \ - TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr) - -#define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \ - auto statusor = (rexpr); \ - if (TF_PREDICT_FALSE(!statusor.ok())) { \ - return statusor.status(); \ - } \ - lhs = std::move(statusor.ValueOrDie()) - } // namespace stream_executor #endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 975434f8fb2a2f..37559bf1f8d0a8 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -48,7 +48,7 @@ load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo") # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.5.2" +VERSION = "2.5.3" VERSION_MAJOR = VERSION.split(".")[0] two_gpu_tags = ["requires-gpu-nvidia:2", "notap", "manual", "no_pip"] diff --git a/tensorflow/tools/ci_build/builds/pip_new.sh b/tensorflow/tools/ci_build/builds/pip_new.sh index a4887adcb4e1f8..601d9332735dc0 100755 --- a/tensorflow/tools/ci_build/builds/pip_new.sh +++ b/tensorflow/tools/ci_build/builds/pip_new.sh @@ -255,6 +255,9 @@ PIP_TEST_ROOT=${TF_PIP_TEST_ROOT:-$DEFAULT_PIP_TEST_ROOT} BUILD_BOTH_GPU_PACKAGES=${TF_BUILD_BOTH_GPU_PACKAGES:-$DEFAULT_BUILD_BOTH_GPU_PACKAGES} BUILD_BOTH_CPU_PACKAGES=${TF_BUILD_BOTH_CPU_PACKAGES:-$DEFAULT_BUILD_BOTH_CPU_PACKAGES} +# Override breaking change in setuptools v60 (https://github.com/pypa/setuptools/pull/2896) +export SETUPTOOLS_USE_DISTUTILS=stdlib + # Local variables PIP_WHL_DIR="${KOKORO_ARTIFACTS_DIR}/tensorflow/${PIP_TEST_ROOT}/whl" mkdir -p "${PIP_WHL_DIR}" diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index bcf189447655b2..cc8287b7e6cf85 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -50,7 +50,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.5.2' +_VERSION = '2.5.3' # We use the same setup.py for all tensorflow_* packages and for the nightly diff --git a/third_party/icu/data/BUILD.bazel b/third_party/icu/data/BUILD.bazel index 80ea92ce9b47d4..ded85987f911f6 100644 --- a/third_party/icu/data/BUILD.bazel +++ b/third_party/icu/data/BUILD.bazel @@ -19,9 +19,28 @@ exports_files(["LICENSE"]) # $ ICU_DATA_FILTER_FILE=filters.json ./runConfigureICU Linux # $ make clean && make # $ cd data/out/tmp -# $ genccode icudt64l.dat -# $ echo 'U_CAPI const void * U_EXPORT2 uprv_getICUData_conversion() { return icudt64l_dat.bytes; }' >> icudt64l_dat.c -# This creates icudt64l_dat.c, which you can move, rename, gzip, then split. +# $ genccode icudt70l.dat # Note: this number must match version, and below too! +# $ echo 'U_CAPI const void * U_EXPORT2 uprv_getICUData_conversion() { return icudt70l_dat.bytes; }' >> icudt70l_dat.c +# +# This creates icudt70l_dat.c, which you can move, rename, gzip, then split, +# for example (but you can change to other numbers): +# $ cp icudt70l_dat.c icu_conversion_data.c +# $ gzip icu_conversion_data.c +# # Note: make sure you don't forget the last . below! +# $ split -a 3 -b 100000 icu_conversion_data.c.gz icu_conversion_data.c.gz. +# +# Then, copy the generated files to this directory, removing existing ones. +# +# The current files have been generated by this filter (in filters.json): +# { +# "localeFilter": { +# "filterType": "language", +# "includelist": [ +# "en" +# ] +# } +# } +# Please make sure to keep this updated if you change the data files. filegroup( name = "conversion_files", srcs = glob(["icu_conversion_data.c.gz.*"]), diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aa b/third_party/icu/data/icu_conversion_data.c.gz.aa deleted file mode 100644 index 543b6615708830..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.aa and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaa b/third_party/icu/data/icu_conversion_data.c.gz.aaa new file mode 100644 index 00000000000000..b11bc8e1c2b268 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaa differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aab b/third_party/icu/data/icu_conversion_data.c.gz.aab new file mode 100644 index 00000000000000..87460f63f97cb7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aab differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aac b/third_party/icu/data/icu_conversion_data.c.gz.aac new file mode 100644 index 00000000000000..57ca5485de4bde Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aac differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aad b/third_party/icu/data/icu_conversion_data.c.gz.aad new file mode 100644 index 00000000000000..a182512aab6a60 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aad differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aae b/third_party/icu/data/icu_conversion_data.c.gz.aae new file mode 100644 index 00000000000000..4527fa522cec12 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aae differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaf b/third_party/icu/data/icu_conversion_data.c.gz.aaf new file mode 100644 index 00000000000000..e1dc807b347f85 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aag b/third_party/icu/data/icu_conversion_data.c.gz.aag new file mode 100644 index 00000000000000..ed6946008feec8 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aag differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aah b/third_party/icu/data/icu_conversion_data.c.gz.aah new file mode 100644 index 00000000000000..1a474bca1fe728 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aah differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aai b/third_party/icu/data/icu_conversion_data.c.gz.aai new file mode 100644 index 00000000000000..4a78d2f18c6f8b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aai differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaj b/third_party/icu/data/icu_conversion_data.c.gz.aaj new file mode 100644 index 00000000000000..5b40d555fdf22e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aak b/third_party/icu/data/icu_conversion_data.c.gz.aak new file mode 100644 index 00000000000000..e43a5cb2b7b7a2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aak differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aal b/third_party/icu/data/icu_conversion_data.c.gz.aal new file mode 100644 index 00000000000000..8856e1e2cb49da Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aal differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aam b/third_party/icu/data/icu_conversion_data.c.gz.aam new file mode 100644 index 00000000000000..5d0d5e3fae793f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aam differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aan b/third_party/icu/data/icu_conversion_data.c.gz.aan new file mode 100644 index 00000000000000..9cbff7140acca4 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aan differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aao b/third_party/icu/data/icu_conversion_data.c.gz.aao new file mode 100644 index 00000000000000..b3e8eab98d0e86 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aao differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aap b/third_party/icu/data/icu_conversion_data.c.gz.aap new file mode 100644 index 00000000000000..a3ec92a470fd8c Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aap differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaq b/third_party/icu/data/icu_conversion_data.c.gz.aaq new file mode 100644 index 00000000000000..cdcdc42024f386 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aar b/third_party/icu/data/icu_conversion_data.c.gz.aar new file mode 100644 index 00000000000000..b3d4a2b8396f8c Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aar differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aas b/third_party/icu/data/icu_conversion_data.c.gz.aas new file mode 100644 index 00000000000000..30dd37ff26925e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aas differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aat b/third_party/icu/data/icu_conversion_data.c.gz.aat new file mode 100644 index 00000000000000..f3e8330204b4aa Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aat differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aau b/third_party/icu/data/icu_conversion_data.c.gz.aau new file mode 100644 index 00000000000000..bd503d27300027 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aau differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aav b/third_party/icu/data/icu_conversion_data.c.gz.aav new file mode 100644 index 00000000000000..7be56870f45656 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aav differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaw b/third_party/icu/data/icu_conversion_data.c.gz.aaw new file mode 100644 index 00000000000000..40057bbc81905a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aax b/third_party/icu/data/icu_conversion_data.c.gz.aax new file mode 100644 index 00000000000000..e3ec8117d5aa65 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aax differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aay b/third_party/icu/data/icu_conversion_data.c.gz.aay new file mode 100644 index 00000000000000..b0c0b5a171b9a0 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aay differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaz b/third_party/icu/data/icu_conversion_data.c.gz.aaz new file mode 100644 index 00000000000000..8cdd177cfc5308 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ab b/third_party/icu/data/icu_conversion_data.c.gz.ab deleted file mode 100644 index d8cd5108e62fb0..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ab and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aba b/third_party/icu/data/icu_conversion_data.c.gz.aba new file mode 100644 index 00000000000000..6a892bd60db59d Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aba differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abb b/third_party/icu/data/icu_conversion_data.c.gz.abb new file mode 100644 index 00000000000000..ce05de8084bf6a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abc b/third_party/icu/data/icu_conversion_data.c.gz.abc new file mode 100644 index 00000000000000..e42ebce1ded76f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abd b/third_party/icu/data/icu_conversion_data.c.gz.abd new file mode 100644 index 00000000000000..04be858c2e71fa Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abd differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abe b/third_party/icu/data/icu_conversion_data.c.gz.abe new file mode 100644 index 00000000000000..f27572bf716a88 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abe differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abf b/third_party/icu/data/icu_conversion_data.c.gz.abf new file mode 100644 index 00000000000000..b1cd4256152abd Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abg b/third_party/icu/data/icu_conversion_data.c.gz.abg new file mode 100644 index 00000000000000..f071eb404cef13 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abh b/third_party/icu/data/icu_conversion_data.c.gz.abh new file mode 100644 index 00000000000000..fcbe80a605b523 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abh differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abi b/third_party/icu/data/icu_conversion_data.c.gz.abi new file mode 100644 index 00000000000000..07b5626d49f7a4 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abi differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abj b/third_party/icu/data/icu_conversion_data.c.gz.abj new file mode 100644 index 00000000000000..17db0aebcaa848 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abk b/third_party/icu/data/icu_conversion_data.c.gz.abk new file mode 100644 index 00000000000000..1df6d71755c019 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abk differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abl b/third_party/icu/data/icu_conversion_data.c.gz.abl new file mode 100644 index 00000000000000..19065efa8bc25b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abl differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abm b/third_party/icu/data/icu_conversion_data.c.gz.abm new file mode 100644 index 00000000000000..97fbe53304eff2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abm differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abn b/third_party/icu/data/icu_conversion_data.c.gz.abn new file mode 100644 index 00000000000000..8b47b3c94def78 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abn differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abo b/third_party/icu/data/icu_conversion_data.c.gz.abo new file mode 100644 index 00000000000000..9985a2de553270 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abo differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abp b/third_party/icu/data/icu_conversion_data.c.gz.abp new file mode 100644 index 00000000000000..ae0a812b9db095 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abp differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abq b/third_party/icu/data/icu_conversion_data.c.gz.abq new file mode 100644 index 00000000000000..8b071f0e6a858e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abr b/third_party/icu/data/icu_conversion_data.c.gz.abr new file mode 100644 index 00000000000000..f00c95e9246f74 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abr differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abs b/third_party/icu/data/icu_conversion_data.c.gz.abs new file mode 100644 index 00000000000000..c0571dc9adf4fc Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abs differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abt b/third_party/icu/data/icu_conversion_data.c.gz.abt new file mode 100644 index 00000000000000..f6c75209c83128 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abt differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abu b/third_party/icu/data/icu_conversion_data.c.gz.abu new file mode 100644 index 00000000000000..7c049c5550077b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abu differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abv b/third_party/icu/data/icu_conversion_data.c.gz.abv new file mode 100644 index 00000000000000..a533067e76125a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abv differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abw b/third_party/icu/data/icu_conversion_data.c.gz.abw new file mode 100644 index 00000000000000..8ad6abb99516e5 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abx b/third_party/icu/data/icu_conversion_data.c.gz.abx new file mode 100644 index 00000000000000..54e0515a944a09 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abx differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aby b/third_party/icu/data/icu_conversion_data.c.gz.aby new file mode 100644 index 00000000000000..6be26e2dda1f5f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aby differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abz b/third_party/icu/data/icu_conversion_data.c.gz.abz new file mode 100644 index 00000000000000..817dd47d5b973d Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ac b/third_party/icu/data/icu_conversion_data.c.gz.ac deleted file mode 100644 index bde21d16f57c16..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ac and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aca b/third_party/icu/data/icu_conversion_data.c.gz.aca new file mode 100644 index 00000000000000..1fac65927fd443 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aca differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acb b/third_party/icu/data/icu_conversion_data.c.gz.acb new file mode 100644 index 00000000000000..f3e6da1f7d0450 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acc b/third_party/icu/data/icu_conversion_data.c.gz.acc new file mode 100644 index 00000000000000..1fb0cc49281c37 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acd b/third_party/icu/data/icu_conversion_data.c.gz.acd new file mode 100644 index 00000000000000..60bfeba83255d6 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acd differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ace b/third_party/icu/data/icu_conversion_data.c.gz.ace new file mode 100644 index 00000000000000..7b60fe5a3ac8d9 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ace differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acf b/third_party/icu/data/icu_conversion_data.c.gz.acf new file mode 100644 index 00000000000000..dd8ebff2963c99 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acg b/third_party/icu/data/icu_conversion_data.c.gz.acg new file mode 100644 index 00000000000000..c5015757d328e7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ach b/third_party/icu/data/icu_conversion_data.c.gz.ach new file mode 100644 index 00000000000000..10c50c1d96a574 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ach differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aci b/third_party/icu/data/icu_conversion_data.c.gz.aci new file mode 100644 index 00000000000000..75be388aee0c11 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aci differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acj b/third_party/icu/data/icu_conversion_data.c.gz.acj new file mode 100644 index 00000000000000..f55b68e633f400 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ack b/third_party/icu/data/icu_conversion_data.c.gz.ack new file mode 100644 index 00000000000000..121d97423eb7ea Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ack differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acl b/third_party/icu/data/icu_conversion_data.c.gz.acl new file mode 100644 index 00000000000000..eafb3b60b47383 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acl differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acm b/third_party/icu/data/icu_conversion_data.c.gz.acm new file mode 100644 index 00000000000000..f7a3b5617bc8c5 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acm differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acn b/third_party/icu/data/icu_conversion_data.c.gz.acn new file mode 100644 index 00000000000000..eff17429e724fd Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acn differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aco b/third_party/icu/data/icu_conversion_data.c.gz.aco new file mode 100644 index 00000000000000..8388dc5c141374 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aco differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acp b/third_party/icu/data/icu_conversion_data.c.gz.acp new file mode 100644 index 00000000000000..1e9a4bc18ed96a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acp differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acq b/third_party/icu/data/icu_conversion_data.c.gz.acq new file mode 100644 index 00000000000000..51a5737930a6a7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acr b/third_party/icu/data/icu_conversion_data.c.gz.acr new file mode 100644 index 00000000000000..96e27c26624b34 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acr differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acs b/third_party/icu/data/icu_conversion_data.c.gz.acs new file mode 100644 index 00000000000000..30b0970756d7e3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acs differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.act b/third_party/icu/data/icu_conversion_data.c.gz.act new file mode 100644 index 00000000000000..21b9688e5e774e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.act differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acu b/third_party/icu/data/icu_conversion_data.c.gz.acu new file mode 100644 index 00000000000000..cea7d355d07ab2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acu differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acv b/third_party/icu/data/icu_conversion_data.c.gz.acv new file mode 100644 index 00000000000000..8ddf19818ced08 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acv differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acw b/third_party/icu/data/icu_conversion_data.c.gz.acw new file mode 100644 index 00000000000000..c9c2bceaaf1930 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acx b/third_party/icu/data/icu_conversion_data.c.gz.acx new file mode 100644 index 00000000000000..0ca1d9aaf65aa3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acx differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acy b/third_party/icu/data/icu_conversion_data.c.gz.acy new file mode 100644 index 00000000000000..fbc2459b6a10ab Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acy differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acz b/third_party/icu/data/icu_conversion_data.c.gz.acz new file mode 100644 index 00000000000000..862436c9459487 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ad b/third_party/icu/data/icu_conversion_data.c.gz.ad deleted file mode 100644 index f476988a0b24fb..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ad and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ada b/third_party/icu/data/icu_conversion_data.c.gz.ada new file mode 100644 index 00000000000000..6034e047321250 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ada differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adb b/third_party/icu/data/icu_conversion_data.c.gz.adb new file mode 100644 index 00000000000000..07b519b21c089a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adc b/third_party/icu/data/icu_conversion_data.c.gz.adc new file mode 100644 index 00000000000000..12d52c54e02a9e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.add b/third_party/icu/data/icu_conversion_data.c.gz.add new file mode 100644 index 00000000000000..e9995953c924b3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.add differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ade b/third_party/icu/data/icu_conversion_data.c.gz.ade new file mode 100644 index 00000000000000..292d09cfd1d457 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ade differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adf b/third_party/icu/data/icu_conversion_data.c.gz.adf new file mode 100644 index 00000000000000..dc2c28d019b7b3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adg b/third_party/icu/data/icu_conversion_data.c.gz.adg new file mode 100644 index 00000000000000..c152c80b1d1ac0 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adh b/third_party/icu/data/icu_conversion_data.c.gz.adh new file mode 100644 index 00000000000000..9fcb83e56560b1 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adh differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ae b/third_party/icu/data/icu_conversion_data.c.gz.ae deleted file mode 100644 index 3388b38c1a2b7a..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ae and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.af b/third_party/icu/data/icu_conversion_data.c.gz.af deleted file mode 100644 index 344e3925f39d60..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.af and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ag b/third_party/icu/data/icu_conversion_data.c.gz.ag deleted file mode 100644 index 249ffddde77176..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ag and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ah b/third_party/icu/data/icu_conversion_data.c.gz.ah deleted file mode 100644 index 8893be204197a0..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ah and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ai b/third_party/icu/data/icu_conversion_data.c.gz.ai deleted file mode 100644 index e6251e3a11c5b2..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ai and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aj b/third_party/icu/data/icu_conversion_data.c.gz.aj deleted file mode 100644 index 3e1dc684c93176..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.aj and /dev/null differ diff --git a/third_party/icu/udata.patch b/third_party/icu/udata.patch index 0b65e4ed3886f1..f31a604c15908a 100644 --- a/third_party/icu/udata.patch +++ b/third_party/icu/udata.patch @@ -41,7 +41,8 @@ diff -ru a/icu4c/source/common/udata.cpp b/icu4c/source/common/udata.cpp } - */ + - #if U_PLATFORM_HAS_WINUWP_API == 0 // Windows UWP Platform does not support dll icu data at this time + #if !defined(ICU_DATA_DIR_WINDOWS) + // When using the Windows system data, we expect only a single data file. setCommonICUDataPointer(&U_ICUDATA_ENTRY_POINT, FALSE, pErrorCode); { diff -ru a/icu4c/source/common/unicode/uconfig.h b/icu4c/source/common/unicode/uconfig.h diff --git a/third_party/icu/workspace.bzl b/third_party/icu/workspace.bzl index e4ed9669e0c7ee..c2ebd557f77a50 100644 --- a/third_party/icu/workspace.bzl +++ b/third_party/icu/workspace.bzl @@ -2,14 +2,16 @@ load("//third_party:repo.bzl", "tf_http_archive") +# NOTE: If you upgrade this, generate the data files by following the +# instructions in third_party/icu/data/BUILD def repo(): tf_http_archive( name = "icu", - strip_prefix = "icu-release-64-2", - sha256 = "dfc62618aa4bd3ca14a3df548cd65fe393155edd213e49c39f3a30ccd618fc27", + strip_prefix = "icu-release-69-1", + sha256 = "3144e17a612dda145aa0e4acb3caa27a5dae4e26edced64bc351c43d5004af53", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/unicode-org/icu/archive/release-64-2.zip", - "https://github.com/unicode-org/icu/archive/release-64-2.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/unicode-org/icu/archive/release-69-1.zip", + "https://github.com/unicode-org/icu/archive/release-69-1.zip", ], build_file = "//third_party/icu:BUILD.bazel", system_build_file = "//third_party/icu:BUILD.system",