diff --git a/.github/workflows/update-nightly.yml b/.github/workflows/update-nightly.yml deleted file mode 100644 index 0265ffbebe2ec0..00000000000000 --- a/.github/workflows/update-nightly.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -on: - workflow_dispatch: # Allow manual triggers - schedule: - - cron: 0 4 * * * # 4am UTC is 9pm PDT and 8pm PST -name: Set nightly branch to master HEAD -jobs: - master-to-nightly: - if: github.repository == 'tensorflow/tensorflow' # Don't do this in forks - runs-on: ubuntu-latest - steps: - - uses: zofrex/mirror-branch@v1 - name: Set nightly branch to master HEAD - with: - target-branch: 'nightly' diff --git a/.zenodo.json b/.zenodo.json new file mode 100644 index 00000000000000..7161180c51ae3e --- /dev/null +++ b/.zenodo.json @@ -0,0 +1,13 @@ +{ + "description": "TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML-powered applications.", + "license": "Apache-2.0", + "title": "TensorFlow", + "upload_type": "software", + "creators": [ + { + "name": "TensorFlow Developers" + } + ], + "access_right": "open", + "notes": "Specific TensorFlow versions can be found in the \"Versions\" list on the right side of this page.
See the full list of authors on GitHub." +} diff --git a/RELEASE.md b/RELEASE.md index 57759c9bc55951..7b4186050d415e 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,24 +1,173 @@ -# Release 2.5.0 - - - -## Breaking Changes - -* -* The `TF_CPP_MIN_VLOG_LEVEL` environment variable has been renamed to to - `TF_CPP_MAX_VLOG_LEVEL` which correctly describes its effect. - -## Known Caveats +# Release 2.5.3 + +This releases introduces several vulnerability fixes: + +* Fixes a floating point division by 0 when executing convolution operators ([CVE-2022-21725](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21725)) +* Fixes a heap OOB read in shape inference for `ReverseSequence` ([CVE-2022-21728](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21728)) +* Fixes a heap OOB access in `Dequantize` ([CVE-2022-21726](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21726)) +* Fixes an integer overflow in shape inference for `Dequantize` ([CVE-2022-21727](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21727)) +* Fixes a heap OOB access in `FractionalAvgPoolGrad` ([CVE-2022-21730](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21730)) +* Fixes an overflow and divide by zero in `UnravelIndex` ([CVE-2022-21729](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21729)) +* Fixes a type confusion in shape inference for `ConcatV2` ([CVE-2022-21731](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21731)) +* Fixes an OOM in `ThreadPoolHandle` ([CVE-2022-21732](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21732)) +* Fixes an OOM due to integer overflow in `StringNGrams` ([CVE-2022-21733](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21733)) +* Fixes more issues caused by incomplete validation in boosted trees code ([CVE-2021-41208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41208)) +* Fixes an integer overflows in most sparse component-wise ops ([CVE-2022-23567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23567)) +* Fixes an integer overflows in `AddManySparseToTensorsMap` ([CVE-2022-23568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23568)) +* Fixes a number of `CHECK`-failures in `MapStage` ([CVE-2022-21734](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21734)) +* Fixes a division by zero in `FractionalMaxPool` ([CVE-2022-21735](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21735)) +* Fixes a number of `CHECK`-fails when building invalid/overflowing tensor shapes ([CVE-2022-23569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23569)) +* Fixes an undefined behavior in `SparseTensorSliceDataset` ([CVE-2022-21736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21736)) +* Fixes an assertion failure based denial of service via faulty bin count operations ([CVE-2022-21737](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21737)) +* Fixes a reference binding to null pointer in `QuantizedMaxPool` ([CVE-2022-21739](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21739)) +* Fixes an integer overflow leading to crash in `SparseCountSparseOutput` ([CVE-2022-21738](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21738)) +* Fixes a heap overflow in `SparseCountSparseOutput` ([CVE-2022-21740](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21740)) +* Fixes an FPE in `BiasAndClamp` in TFLite ([CVE-2022-23557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23557)) +* Fixes an FPE in depthwise convolutions in TFLite ([CVE-2022-21741](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21741)) +* Fixes an integer overflow in TFLite array creation ([CVE-2022-23558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23558)) +* Fixes an integer overflow in TFLite ([CVE-2022-23559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23559)) +* Fixes a dangerous OOB write in TFLite ([CVE-2022-23561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23561)) +* Fixes a vulnerability leading to read and write outside of bounds in TFLite ([CVE-2022-23560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23560)) +* Fixes a set of vulnerabilities caused by using insecure temporary files ([CVE-2022-23563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23563)) +* Fixes an integer overflow in Range resulting in undefined behavior and OOM ([CVE-2022-23562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23562)) +* Fixes a vulnerability where missing validation causes `tf.sparse.split` to crash when `axis` is a tuple ([CVE-2021-41206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41206)) +* Fixes a `CHECK`-fail when decoding resource handles from proto ([CVE-2022-23564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23564)) +* Fixes a `CHECK`-fail with repeated `AttrDef` ([CVE-2022-23565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23565)) +* Fixes a heap OOB write in Grappler ([CVE-2022-23566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23566)) +* Fixes a `CHECK`-fail when decoding invalid tensors from proto ([CVE-2022-23571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23571)) +* Fixes an unitialized variable access in `AssignOp` ([CVE-2022-23573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23573)) +* Fixes an integer overflow in `OpLevelCostEstimator::CalculateTensorSize` ([CVE-2022-23575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23575)) +* Fixes an integer overflow in `OpLevelCostEstimator::CalculateOutputSize` ([CVE-2022-23576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23576)) +* Fixes a null dereference in `GetInitOp` ([CVE-2022-23577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23577)) +* Fixes a memory leak when a graph node is invalid ([CVE-2022-23578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23578)) +* Fixes an abort caused by allocating a vector that is too large ([CVE-2022-23580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23580)) +* Fixes multiple `CHECK`-failures during Grappler's `IsSimplifiableReshape` ([CVE-2022-23581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23581)) +* Fixes multiple `CHECK`-failures during Grappler's `SafeToRemoveIdentity` ([CVE-2022-23579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23579)) +* Fixes multiple `CHECK`-failures in `TensorByteSize` ([CVE-2022-23582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23582)) +* Fixes multiple `CHECK`-failures in binary ops due to type confusion ([CVE-2022-23583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23583)) +* Fixes a use after free in `DecodePng` kernel ([CVE-2022-23584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23584)) +* Fixes a memory leak in decoding PNG images ([CVE-2022-23585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23585)) +* Fixes multiple `CHECK`-fails in `function.cc` ([CVE-2022-23586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23586)) +* Fixes multiple `CHECK`-fails due to attempting to build a reference tensor ([CVE-2022-23588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23588)) +* Fixes an integer overflow in Grappler cost estimation of crop and resize operation ([CVE-2022-23587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23587)) +* Fixes a null pointer dereference in Grappler's `IsConstant` ([CVE-2022-23589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23589)) +* Fixes a `CHECK` failure in constant folding ([CVE-2021-41197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41197)) +* Fixes a stack overflow due to self-recursive function in `GraphDef` ([CVE-2022-23591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23591)) +* Updates `icu` to `69.1` to handle [CVE-2020-10531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10531) + +# Release 2.5.2 + +This release introduces several vulnerability fixes: + +* Fixes a code injection issue in `saved_model_cli` ([CVE-2021-41228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41228)) +* Fixes a vulnerability due to use of uninitialized value in Tensorflow ([CVE-2021-41225](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41225)) +* Fixes a heap OOB in `FusedBatchNorm` kernels ([CVE-2021-41223](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41223)) +* Fixes an arbitrary memory read in `ImmutableConst` ([CVE-2021-41227](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41227)) +* Fixes a heap OOB in `SparseBinCount` ([CVE-2021-41226](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41226)) +* Fixes a heap OOB in `SparseFillEmptyRows` ([CVE-2021-41224](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41224)) +* Fixes a segfault due to negative splits in `SplitV` ([CVE-2021-41222](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41222)) +* Fixes segfaults and vulnerabilities caused by accesses to invalid memory during shape inference in `Cudnn*` ops ([CVE-2021-41221](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41221)) +* Fixes a null pointer exception when `Exit` node is not preceded by `Enter` op ([CVE-2021-41217](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41217)) +* Fixes an integer division by 0 in `tf.raw_ops.AllToAll` ([CVE-2021-41218](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41218)) +* Fixes an undefined behavior via `nullptr` reference binding in sparse matrix multiplication ([CVE-2021-41219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41219)) +* Fixes a heap buffer overflow in `Transpose` ([CVE-2021-41216](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41216)) +* Prevents deadlocks arising from mutually recursive `tf.function` objects ([CVE-2021-41213](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41213)) +* Fixes a null pointer exception in `DeserializeSparse` ([CVE-2021-41215](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41215)) +* Fixes an undefined behavior arising from reference binding to `nullptr` in `tf.ragged.cross` ([CVE-2021-41214](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41214)) +* Fixes a heap OOB read in `tf.ragged.cross` ([CVE-2021-41212](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41212)) +* Fixes a heap OOB read in all `tf.raw_ops.QuantizeAndDequantizeV*` ops ([CVE-2021-41205](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41205)) +* Fixes an FPE in `ParallelConcat` ([CVE-2021-41207](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41207)) +* Fixes FPE issues in convolutions with zero size filters ([CVE-2021-41209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41209)) +* Fixes a heap OOB read in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-41210](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41210)) +* Fixes vulnerabilities caused by incomplete validation in boosted trees code ([CVE-2021-41208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41208)) +* Fixes vulnerabilities caused by incomplete validation of shapes in multiple TF ops ([CVE-2021-41206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41206)) +* Fixes a segfault produced while copying constant resource tensor ([CVE-2021-41204](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41204)) +* Fixes a vulnerability caused by unitialized access in `EinsumHelper::ParseEquation` ([CVE-2021-41201](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41201)) +* Fixes several vulnerabilities and segfaults caused by missing validation during checkpoint loading ([CVE-2021-41203](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41203)) +* Fixes an overflow producing a crash in `tf.range` ([CVE-2021-41202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41202)) +* Fixes an overflow producing a crash in `tf.image.resize` when size is large ([CVE-2021-41199](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41199)) +* Fixes an overflow producing a crash in `tf.tile` when tiling tensor is large ([CVE-2021-41198](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41198)) +* Fixes a vulnerability produced due to incomplete validation in `tf.summary.create_file_writer` ([CVE-2021-41200](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41200)) +* Fixes multiple crashes due to overflow and `CHECK`-fail in ops with large tensor shapes ([CVE-2021-41197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41197)) +* Fixes a crash in `max_pool3d` when size argument is 0 or negative ([CVE-2021-41196](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41196)) +* Fixes a crash in `tf.math.segment_*` operations ([CVE-2021-41195](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-41195)) +* Updates `curl` to `7.78.0` to handle + [CVE-2021-22922](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22922), + [CVE-2021-22923](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22923), + [CVE-2021-22924](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22924), + [CVE-2021-22925](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22925), + and + [CVE-2021-22926](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22926). + +# Release 2.5.1 + +This release introduces several vulnerability fixes: + +* Fixes a heap out of bounds access in sparse reduction operations ([CVE-2021-37635](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37635)) +* Fixes a floating point exception in `SparseDenseCwiseDiv` ([CVE-2021-37636](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37636)) +* Fixes a null pointer dereference in `CompressElement` ([CVE-2021-37637](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37637)) +* Fixes a null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-37638](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37638)) +* Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors ([CVE-2021-37639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37639)) +* Fixes an integer division by 0 in sparse reshaping ([CVE-2021-37640](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37640)) +* Fixes a division by 0 in `ResourceScatterDiv` ([CVE-2021-37642](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37642)) +* Fixes a heap OOB in `RaggedGather` ([CVE-2021-37641](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37641)) +* Fixes a `std::abort` raised from `TensorListReserve` ([CVE-2021-37644](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37644)) +* Fixes a null pointer dereference in `MatrixDiagPartOp` ([CVE-2021-37643](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37643)) +* Fixes an integer overflow due to conversion to unsigned ([CVE-2021-37645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37645)) +* Fixes a bad allocation error in `StringNGrams` caused by integer conversion ([CVE-2021-37646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37646)) +* Fixes a null pointer dereference in `SparseTensorSliceDataset` ([CVE-2021-37647](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37647)) +* Fixes an incorrect validation of `SaveV2` inputs ([CVE-2021-37648](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37648)) +* Fixes a null pointer dereference in `UncompressElement` ([CVE-2021-37649](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37649)) +* Fixes a segfault and a heap buffer overflow in `{Experimental,}DatasetToTFRecord` ([CVE-2021-37650](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37650)) +* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-37651](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37651)) +* Fixes a use after free in boosted trees creation ([CVE-2021-37652](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37652)) +* Fixes a division by 0 in `ResourceGather` ([CVE-2021-37653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37653)) +* Fixes a heap OOB and a `CHECK` fail in `ResourceGather` ([CVE-2021-37654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37654)) +* Fixes a heap OOB in `ResourceScatterUpdate` ([CVE-2021-37655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37655)) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToSparse` ([CVE-2021-37656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37656)) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixDiagV*` ops ([CVE-2021-37657](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37657)) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixSetDiagV*` ops ([CVE-2021-37658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37658)) +* Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops ([CVE-2021-37659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37659)) +* Fixes a division by 0 in inplace operations ([CVE-2021-37660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37660)) +* Fixes a crash caused by integer conversion to unsigned ([CVE-2021-37661](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37661)) +* Fixes an undefined behavior arising from reference binding to nullptr in boosted trees ([CVE-2021-37662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37662)) +* Fixes a heap OOB in boosted trees ([CVE-2021-37664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37664)) +* Fixes vulnerabilities arising from incomplete validation in `QuantizeV2` ([CVE-2021-37663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37663)) +* Fixes vulnerabilities arising from incomplete validation in MKL requantization ([CVE-2021-37665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37665)) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToVariant` ([CVE-2021-37666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37666)) +* Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding ([CVE-2021-37667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37667)) +* Fixes an FPE in `tf.raw_ops.UnravelIndex` ([CVE-2021-37668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37668)) +* Fixes a crash in NMS ops caused by integer conversion to unsigned ([CVE-2021-37669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37669)) +* Fixes a heap OOB in `UpperBound` and `LowerBound` ([CVE-2021-37670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37670)) +* Fixes an undefined behavior arising from reference binding to nullptr in map operations ([CVE-2021-37671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37671)) +* Fixes a heap OOB in `SdcaOptimizerV2` ([CVE-2021-37672](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37672)) +* Fixes a `CHECK`-fail in `MapStage` ([CVE-2021-37673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37673)) +* Fixes a vulnerability arising from incomplete validation in `MaxPoolGrad` ([CVE-2021-37674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37674)) +* Fixes an undefined behavior arising from reference binding to nullptr in shape inference ([CVE-2021-37676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37676)) +* Fixes a division by 0 in most convolution operators ([CVE-2021-37675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37675)) +* Fixes vulnerabilities arising from missing validation in shape inference for `Dequantize` ([CVE-2021-37677](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37677)) +* Fixes an arbitrary code execution due to YAML deserialization ([CVE-2021-37678](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37678)) +* Fixes a heap OOB in nested `tf.map_fn` with `RaggedTensor`s ([CVE-2021-37679](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37679)) +* Fixes a division by zero in TFLite ([CVE-2021-37680](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37680)) +* Fixes an NPE in TFLite ([CVE-2021-37681](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37681)) +* Fixes a vulnerability arising from use of unitialized value in TFLite ([CVE-2021-37682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37682)) +* Fixes an FPE in TFLite division operations ([CVE-2021-37683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37683)) +* Fixes an FPE in TFLite pooling operations ([CVE-2021-37684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37684)) +* Fixes an infinite loop in TFLite ([CVE-2021-37686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37686)) +* Fixes a heap OOB in TFLite ([CVE-2021-37685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37685)) +* Fixes a heap OOB in TFLite's `Gather*` implementations ([CVE-2021-37687](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37687)) +* Fixes an undefined behavior arising from null pointer dereference in TFLite ([CVE-2021-37688](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37688)) +* Fixes an undefined behavior arising from null pointer dereference in TFLite MLIR optimizations ([CVE-2021-37689](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37689)) +* Fixes a FPE in LSH in TFLite ([CVE-2021-37691](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37691)) +* Fixes a segfault on strings tensors with mismatched dimensions, arising in Go code ([CVE-2021-37692](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37692)) +* Fixes a use after free and a potential segfault in shape inference functions ([CVE-2021-37690](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37690)) +* Updates `curl` to `7.77.0` to handle [CVE-2021-22876](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22876), [CVE-2021-22897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22897), [CVE-2021-22898](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22898), and [CVE-2021-22901](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22901). -* -* -* +# Release 2.5.0 ## Major Features and Improvements -* -* - +* Support for Python3.9 has been added. * TPU embedding support * Added `profile_data_directory` to `EmbeddingConfigSpec` in `_tpu_estimator_embedding.py`. This allows embedding lookup statistics @@ -60,31 +209,48 @@ * `tf.distribute.experimental.ParameterServerStrategy` now supports training with Keras `Model.fit` when used with `DatasetCreator`. * PluggableDevice - * Third-party devices can now connect to TensorFlow - [modularly](https://github.com/tensorflow/community/blob/master/rfcs/20190305-modular-tensorflow.md) - through [StreamExecutor C API](https://github.com/tensorflow/community/blob/master/rfcs/20200612-stream-executor-c-api.md). + * Third-party devices can now connect to TensorFlow as plug-ins through + [StreamExecutor C API](https://github.com/tensorflow/community/blob/master/rfcs/20200612-stream-executor-c-api.md). and [PluggableDevice](https://github.com/tensorflow/community/blob/master/rfcs/20200624-pluggable-device-for-tensorflow.md) interface. * Add custom ops and kernels through [kernel and op registration C API](https://github.com/tensorflow/community/blob/master/rfcs/20190814-kernel-and-op-registration.md). * Register custom graph optimization passes with - [graph optimization C API](https://github.com/tensorflow/community/blob/master/rfcs/20201027-modular-tensorflow-graph-c-api.md). -* [oneAPI Deep Neural Network Library (oneDNN)](https://github.com/oneapi-src/oneDNN) + [graph optimization C API](https://github.com/tensorflow/community/blob/master/rfcs/20201027-modular-tensorflow-graph-c-api.md). +* [oneAPI Deep Neural Network Library (oneDNN)](https://github.com/oneapi-src/oneDNN) CPU performance optimizations from [Intel-optimized TensorFlow](https://software.intel.com/content/www/us/en/develop/articles/intel-optimization-for-tensorflow-installation-guide.html) - are now available in the official x86-64 Linux and Windows builds. + are now available in the official x86-64 Linux and Windows builds. * They are off by default. Enable them by setting the environment variable `TF_ENABLE_ONEDNN_OPTS=1`. * We do not recommend using them in GPU systems, as they have not been sufficiently tested with GPUs yet. +* TensorFlow pip packages are now built with CUDA11.2 and cuDNN 8.1.0 + +## Breaking Changes + +* The `TF_CPP_MIN_VLOG_LEVEL` environment variable has been renamed to to + `TF_CPP_MAX_VLOG_LEVEL` which correctly describes its effect. ## Bug Fixes and Other Changes -* -* -* * `tf.keras`: - * Improvements to Keras preprocessing layers: - * Discretization combiner implemented, with additional arg `epsilon`. + * Preprocessing layers API consistency changes: + * `StringLookup` added `output_mode`, `sparse`, and + `pad_to_max_tokens` arguments with same semantics as + `TextVectorization`. + * `IntegerLookup` added `output_mode`, `sparse`, and + `pad_to_max_tokens` arguments with same semantics as + `TextVectorization`. Renamed `max_values`, `oov_value` and + `mask_value` to `max_tokens`, `oov_token` and `mask_token` to align + with `StringLookup` and `TextVectorization`. + * `TextVectorization` default for `pad_to_max_tokens` switched to + False. + * `CategoryEncoding` no longer supports `adapt`, `IntegerLookup` + now supports equivalent functionality. `max_tokens` argument renamed + to `num_tokens`. + * `Discretization` added `num_bins` argument for learning bins + boundaries through calling `adapt` on a dataset. Renamed `bins` + argument to `bin_boundaries` for specifying bins without `adapt`. * Improvements to model saving/loading: * `model.load_weights` now accepts paths to saved models. * Keras inputs can now be created directly from arbitrary `tf.TypeSpecs`. @@ -108,6 +274,7 @@ the input pipeline to insert sharding transformations. * Make tf.data.Options persistent across `tf.function` and `GraphDef` boundaries. + * XLA compilation: * `tf.function(experimental_compile=True)` has become a stable API, renamed `tf.function(jit_compile=True)`. @@ -157,7 +324,7 @@ ML authoring is generally discouraged. * Add support for static hash tables through `TFLiteConverter.from_saved_model`. - * The Python TF Lite Interpreter bindings now have an option + * The Python TF Lite Interpreter bindings now has an option `experimental_preserve_all_tensors` to aid in debugging conversion. * Quantized x86 execution defaults to Ruy GEMM library for platforms with AVX support. @@ -175,7 +342,7 @@ `tf.GradientTape` inside a `tf.function`. * Changed the default step size in `gradient_checker_v2.compute_gradients` to be exactly representable as a binary floating point numbers. This avoids poluting gradient approximations needlessly, which is some cases leads to false negatives in op gradient tests. * Added `tf.config.experimental.get_memory_info`, returning a dict with the - current and peak memory usage. Deprecated + current and peak memory usage. Deprecated `tf.config.experimental.get_memory_usage` in favor of this new function. * Extended `tf.config.experimental.enable_tensor_float_32_execution` to control Tensor-Float-32 evaluation in RNNs. @@ -185,9 +352,9 @@ https://github.com/abseil/abseil-cpp/blob/master/absl/status/status.h * `tf.summary`: - * New `tf.summary.graph` allows manual write of TensorFlow graph - (`tf.Graph` or `tf.compat.v1.GraphDef`) as a summary. This is not a - replacement for the trace-based API. + * New `tf.summary.graph` allows manual write of TensorFlow graph + (`tf.Graph` or `tf.compat.v1.GraphDef`) as a summary. This is not a + replacement for the trace-based API. * Set `/d2ReducedOptimizeHugeFunctions` by default for Windows builds. This provides a big compile-time speedup, and effectively raises the minimum @@ -221,10 +388,149 @@ the MLIR bridge in a \"safe\" mode. This runs the MLIR bridge in a FallbackEnabled mode when an analysis of the graph determines that the graph does not have unsupported features. +* Deterministic Op Functionality: + * Add determinism-unimplemented exception-throwing to the segment-sum ops. + When the environment variable `TF_DETERMINISTIC_OPS` is set to `"true"` + or `"1"` (when op-determinism is expected), an attempt to run the + folowing ops on a GPU will throw `tf.errors.UnimplementedError` (with an + understandable message) when `data` is a floating-point type, including + complex types (if supported): `tf.math.segment_prod`, + `tf.math.segment_sum`, `tf.math.unsorted_segment_mean`, + `tf.math.unsorted_segment_sqrt_n`, `tf.math.unsorted_segment_prod`, + `tf.math.unsorted_segment_sum`, and therefore also + `tf.convert_to_tensor` when `value` is of type `tf.IndexedSlices` (such + as in the backprop though `tf.gather` into a dense embedding). See + issue [39751](https://github.com/tensorflow/tensorflow/issues/39751) + which this change addresses, but does not solve. This exception-throwing + behavior can be disabled by setting the environment variable + `TF_DISABLE_SEGMENT_REDUCTION_OP_DETERMINISM_EXCEPTIONS` to `"true"` or + `"1"`. For more information about these changes, see the description in + pull request + [47772](https://github.com/tensorflow/tensorflow/pull/47772). + * In previous versions of TensorFlow, when a GPU was available, + `tf.sparse.sparse_dense_matmul` introduced truly random noise in the + forward path for data of type `tf.float32` but not for data of type + `tf.float64` (for which there was no GPU implementation). In this + current release, GPU support for other floating-point types + (`tf.float16`, `tf.float64`, `tf.complex64`, and `tf.complex128`) has + been added for this op. If you were relying on the determinism of the + `tf.float64` CPU implementation being automatically selected because of + the absence of the `tf.float64` GPU implementation, you with either + need to force the op to run on the CPU or use a different data type. +* Security + * Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512)) + * Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514)) + * Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513)) + * Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515)) + * Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516)) + * Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517)) + * Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518)) + * Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519)) + * Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521)) + * Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520)) + * Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522)) + * Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523)) + * Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524)) + * Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525)) + * Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526)) + * Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527)) + * Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528)) + * Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530)) + * Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529)) + * Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531)) + * Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532)) + * Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533)) + * Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535)) + * Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534)) + * Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537)) + * Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536)) + * Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538)) + * Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540)) + * Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542)) + * Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541)) + * Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544)) + * Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543)) + * Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545)) + * Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546)) + * Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547)) + * Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548)) + * Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549)) + * Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550)) + * Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551)) + * Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553)) + * Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552)) + * Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554)) + * Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555)) + * Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557)) + * Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556)) + * Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558)) + * Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559)) + * Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560)) + * Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561)) + * Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562)) + * Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563)) + * Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564)) + * Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565)) + * Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566)) + * Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568)) + * Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567)) + * Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570)) + * Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569)) + * Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571)) + * Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572)) + * Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575)) + * Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573)) + * Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574)) + * Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576)) + * Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577)) + * Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580)) + * Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578)) + * Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579)) + * Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581)) + * Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582)) + * Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584)) + * Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583)) + * Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585)) + * Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586)) + * Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587)) + * Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589)) + * Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588)) + * Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590)) + * Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592)) + * Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591)) + * Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595)) + * Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594)) + * Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596)) + * Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593)) + * Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597)) + * Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598)) + * Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599)) + * Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600)) + * Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602)) + * Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604)) + * Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601)) + * Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605)) + * Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603)) + * Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606)) + * Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608)) + * Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609)) + * Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607)) + * Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611)) + * Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610)) + * Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612)) + * Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613)) + * Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614)) + * Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615)) + * Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616)) + * Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618)) + * Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617)) + * Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619)) + * Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539)) + * Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286). * Other - * Adding show_debug_info to mlir.convert_graph_def and - mlir.convert_function. + * Added `show_debug_info` to `mlir.convert_graph_def` and + `mlir.convert_function`. * Added [Arm Compute Library (ACL)](https://github.com/ARM-software/ComputeLibrary) support to `--config=mkl_aarch64` build. @@ -232,7 +538,7 @@ This release contains contributions from many people at Google, as well as: -, , , , , +8bitmp3, Aaron S. Mondal, Abhilash Mahendrakar, Abhinav Upadhyay, Abhishek Kulkarni, Abolfazl Shahbazi, Adam Hillier, Aditya Kane, Ag Ramesh, ahmedsabie, Albert Villanova Del Moral, Aleksey Vitebskiy, Alex Hoffman, Alexander Bayandin, Alfie Edwards, Aman Kishore, Amogh Joshi, andreABbauer, Andrew Goodbody, Andrzej Pomirski, Artemiy Ryabinkov, Ashish Jha, ather, Ayan Moitra, Bairen Yi, Bart Ribbers, Bas Aarts, Behzad Abghari, Ben Arnao, Ben Barsdell, Benjamin Klimczak, bhack, Brendan Collins, Can Wang, Cheng Ren, Chris Leary, Chris Olivier, Clemens Giuliani, Cloud Han, Corey Cole, Cui, Yifeng, Cuong V. Nguyen, Daniel Moore, Dawid Wojciechowski, Ddavis-2015, Dean Wyatte, Denisa Roberts, dependabot[bot], Dmitry Volodin, Dominic Jack, Duncan Riach, dushuai, Elena Zhelezina, Eli Osherovich, Erik Smistad, ewsn1593, Felix Fent, fo40225, François Chollet, Frederic Bastien, Freedom" Koan-Sin Tan, fsx950223, ganand1, gbaned, Georgiy Manuilov, gerbauz, Guillaume Klein, Guozhong Zhuang, Harry Slatyer, Harsh188, henri, Henri Woodcock, Hiran Sarkar, Hollow Man, Håkon Sandsmark, I Wayan Dharmana, icysapphire, Ikko Ashimine, Jab Hofmeier, Jack Hessel, Jacob Valdez, Jakub Jatczak, James Bernardi, Jared Smolens, Jason Zaman, jedlimlx, Jenny Plunkett, Jens Elofsson, Jerry Shih, jgehw, Jia Fu Low, Jim Fisher, jpodivin, Julien Stephan, Jungsub Lim, Junha Park, Junhyuk So, justkw, Kaixi Hou, kashyapraval, Kasra Bigdeli, Kazuaki Ishizaki, Keith Mok, Kevin Cheng, kopytjuk, Kristian Hartikainen, ksood12345, Kulin Seth, kushanam, latyas, Lequn Chen, Leslie-Fang, Long M. Lưu, Lukas Geiger, machineko, Mahmoud Abuzaina, Manish, Mao Yunfei, Maozhou, Ge, Marcin Juszkiewicz, Marcin Owsiany, Marconi Jiang, Marcos Pereira, Maria Romanenko Vexlard, Maria Vexlard, Marius Brehler, marload, Martin Kubovčík, Matej, Mateusz Holenko, Maxiwell S. Garcia, Mazhar, mazharul, mbhuiyan, mdfaijul, Michael Gielda, Michael Kuchnik, Michal Szutenberg, Mikhail Stepanov, Milan Straka, Mitchel Humpherys, Mohamed Moselhy, Mohamed Nour Abouelseoud, Måns Bermell, Måns Nilsson, Nathan Luehr, Nico Jahn, Niroop Ammbashankar, Oceania2018, Omri Steiner, Orivej Desh, Oskar Flordal, oujiafan, Patrik Laurell, Paul B. Isaac'S, Paul Klinger, Pawel Piskorski, Pedro Marques, Phat Tran, Piotr Zierhoffer, piyushdatta, Pnikam-Cad, Prashant Kumar, Prateek Gupta, PratsBhatt, Pravin Karandikar, qqq.jq, QQ喵, Quintin, Rama Ketineni, ravikyram, Rehan Guha, rhdong, rmothukuru, Roger Cheng, Rohit Santhanam, rposts, Rsanthanam-Amd, rsun, Rsun-Bdti, Ryan Kuester, ryanking13, Saduf2019, Sami Kama, Samuel Marks, Scott Tseng, Sean Moriarity, Sergey Popov, Sergii Khomenko, Sheng, Yang, shwetaoj, Sidong-Wei, Simon Maurer, Simrit Kaur, Srini511, Srinivasan Narayanamoorthy, Stephan, Stephen Matthews, Sungmann Cho, Sunoru, Suraj Sudhir, Suraj Upadhyay, Taebum Kim, Takayoshi Koizumi, Tamas Bela Feher, Teng Lu, Thibaut Goetghebuer-Planchon, Tomwildenhain-Microsoft, Tony, Traun Leyden, Trent Lo, TVLIgnacy, Tzu-Wei Sung, vaibhav, Vignesh Kothapalli, Vikram Dattu, viktprog, Vinayaka Bandishti, Vincent Abriou, Vishakha Agrawal, Vivek Panyam, Vladimir Silyaev, Võ Văn Nghĩa, wamuir, Wang, Yanzhang, wangsiyu, Waqar Hameed, wxinix, Xiao Yang, xiaohong1031, Xiaoming (Jason) Cui, Xinan Jiang, Yair Ehrenwald, Yajush Vyas, Yasir Modak, Yimei Sun, Yong Tang, Yosshi999, youshenmebutuo, yqtianust, Yuan Tang, yuanbopeng, Yuriy Chernyshov, Yuta Fukasawa, Zachary Deane-Mayer, Zeno Gantner, Zhoulong Jiang, zhuyie, zilinzhu, 彭震东 # Release 2.4.1 diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index dcd652d9fdf3d6..dcab34cc965e0f 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -23,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/monitoring/counter.h" @@ -95,6 +96,19 @@ static Status ValidateNode(const NodeDef& node) { return Status::OK(); } +static Status ValidateFunctionNotRecursive(const FunctionDef& function) { + const auto& function_name = function.signature().name(); + for (const auto& node : function.node_def()) { + if (node.op() == function_name) { + return errors::FailedPrecondition( + "Function ", function_name, + " is self recursive and TensorFlow does not support this scenario."); + } + } + + return Status::OK(); +} + static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { TF_RETURN_IF_ERROR(ValidateNode(node)); @@ -106,6 +120,10 @@ static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : function.node_def()) { TF_RETURN_IF_ERROR(ValidateNode(node)); } + + // Also check that there is no recursivity in the library + // TODO(mihaimaruseac): Do more than self-recursivity + TF_RETURN_IF_ERROR(ValidateFunctionNotRecursive(function)); } } diff --git a/tensorflow/cc/saved_model/loader_util.cc b/tensorflow/cc/saved_model/loader_util.cc index 100cae2291333f..411dc41fd44837 100644 --- a/tensorflow/cc/saved_model/loader_util.cc +++ b/tensorflow/cc/saved_model/loader_util.cc @@ -34,9 +34,14 @@ Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def, const auto& init_op_sig_it = meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey); if (init_op_sig_it != sig_def_map.end()) { - *init_op_name = init_op_sig_it->second.outputs() - .find(kSavedModelInitOpSignatureKey) - ->second.name(); + const auto& sig_def_outputs = init_op_sig_it->second.outputs(); + const auto& sig_def_outputs_it = + sig_def_outputs.find(kSavedModelInitOpSignatureKey); + if (sig_def_outputs_it == sig_def_outputs.end()) { + return errors::FailedPrecondition("Could not find output ", + kSavedModelInitOpSignatureKey); + } + *init_op_name = sig_def_outputs_it->second.name(); return Status::OK(); } diff --git a/tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir b/tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir index 32713012ad4ef6..144ab70baca6e4 100644 --- a/tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir +++ b/tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir @@ -2,7 +2,7 @@ // RUN: tf-opt %s -tfl-modify-io-nodes -tfl-test-io-types="int8,int8" | FileCheck --check-prefix=INT8 %s // RUN: tf-opt %s -tfl-modify-io-nodes -tfl-test-io-types="uint8,uint8" | FileCheck --check-prefix=UINT8 %s -func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { +func @modified(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { %cst = constant dense<[1, 401408]> : tensor<2xi32> %0 = "tfl.quantize"(%arg0) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> %1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> @@ -13,7 +13,7 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { %6 = "tfl.dequantize"(%5) : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408xf32> return %6 : tensor<1x401408xf32> -// CHECK-LABEL: func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { +// CHECK-LABEL: func @modified(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { // CHECK-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> // CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> // CHECK-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> @@ -24,7 +24,7 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { // CHECK-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[softmax]]) : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408xf32> // CHECK-NEXT: return %[[dq]] : tensor<1x401408xf32> -// INT8-LABEL: @main(%arg0: tensor<1x224x224x3x!quant.uniform>) -> tensor<1x401408x!quant.uniform> { +// INT8-LABEL: @modified(%arg0: tensor<1x224x224x3x!quant.uniform>) -> tensor<1x401408x!quant.uniform> { // INT8-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> // INT8-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> // INT8-NEXT: %[[cst2:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform> @@ -33,7 +33,7 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { // INT8-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> // INT8-NEXT: return %[[softmax]] : tensor<1x401408x!quant.uniform> -// UINT8-LABEL: func @main(%arg0: tensor<1x224x224x3x!quant.uniform>) -> tensor<1x401408x!quant.uniform> { +// UINT8-LABEL: func @modified(%arg0: tensor<1x224x224x3x!quant.uniform>) -> tensor<1x401408x!quant.uniform> { // UINT8-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> // UINT8-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3x!quant.uniform>) -> tensor<1x224x224x3x!quant.uniform> // UINT8-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> @@ -44,3 +44,47 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { // UINT8-NEXT: %[[dq:.*]] = "tfl.quantize"(%[[softmax]]) {qtype = tensor<1x401408x!quant.uniform>} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> // UINT8-NEXT: return %[[dq]] : tensor<1x401408x!quant.uniform> } + +func @not_modified(%arg0: tensor, %arg1: tensor<1x224x224x3xf32>) -> (tensor<1x401408xf32>, tensor<1x224x224x3xf32>) { + %cst = constant dense<[1, 401408]> : tensor<2xi32> + %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> + %1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> + %2 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform> + %3 = "tfl.conv_2d"(%0, %1, %2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3x!quant.uniform>, tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>>, tensor<32x!quant.uniform>) -> tensor<1x112x112x32x!quant.uniform> + %4 = "tfl.reshape"(%3, %cst) : (tensor<1x112x112x32x!quant.uniform>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform> + %5 = "tfl.softmax"(%4) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> + %6 = "tfl.dequantize"(%5) : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408xf32> + return %6, %arg1 : tensor<1x401408xf32>, tensor<1x224x224x3xf32> + +// CHECK-LABEL: func @not_modified(%arg0: tensor, %arg1: tensor<1x224x224x3xf32>) -> (tensor<1x401408xf32>, tensor<1x224x224x3xf32>) { +// CHECK-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> +// CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> +// CHECK-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> +// CHECK-NEXT: %[[cst2:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform> +// CHECK-NEXT: %[[conv:.*]] = "tfl.conv_2d"(%[[q]], %[[cst1]], %[[cst2]]) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3x!quant.uniform>, tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>>, tensor<32x!quant.uniform>) -> tensor<1x112x112x32x!quant.uniform> +// CHECK-NEXT: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x112x112x32x!quant.uniform>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform> +// CHECK-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> +// CHECK-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[softmax]]) : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408xf32> +// CHECK-NEXT: return %[[dq]], %arg1 : tensor<1x401408xf32>, tensor<1x224x224x3xf32> + +// INT8-LABEL: @not_modified(%arg0: tensor, %arg1: tensor<1x224x224x3xf32>) -> (tensor<1x401408x!quant.uniform>, tensor<1x224x224x3xf32>) { +// INT8-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> +// INT8-NEXT: %[[q:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> +// INT8-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> +// INT8-NEXT: %[[cst2:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform> +// INT8-NEXT: %[[conv:.*]] = "tfl.conv_2d"(%[[q]], %[[cst1]], %[[cst2]]) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3x!quant.uniform>, tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>>, tensor<32x!quant.uniform>) -> tensor<1x112x112x32x!quant.uniform> +// INT8-NEXT: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x112x112x32x!quant.uniform>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform> +// INT8-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> +// INT8-NEXT: return %[[softmax]], %arg1 : tensor<1x401408x!quant.uniform>, tensor<1x224x224x3xf32> + +// UINT8-LABEL: func @not_modified(%arg0: tensor, %arg1: tensor<1x224x224x3xf32>) -> (tensor<1x401408x!quant.uniform>, tensor<1x224x224x3xf32>) { +// UINT8-NEXT: %[[shape:.*]] = constant dense<[1, 401408]> : tensor<2xi32> +// UINT8-NEXT: %[[q:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x224x224x3x!quant.uniform>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform> +// UINT8-NEXT: %[[cst1:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>> +// UINT8-NEXT: %[[cst2:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform> +// UINT8-NEXT: %[[conv:.*]] = "tfl.conv_2d"(%[[q]], %[[cst1]], %[[cst2]]) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3x!quant.uniform>, tensor<32x3x3x3x!quant.uniform:f32, 0.021826678373682216>>, tensor<32x!quant.uniform>) -> tensor<1x112x112x32x!quant.uniform> +// UINT8-NEXT: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x112x112x32x!quant.uniform>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform> +// UINT8-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> +// UINT8-NEXT: %[[dq:.*]] = "tfl.quantize"(%[[softmax]]) {qtype = tensor<1x401408x!quant.uniform>} : (tensor<1x401408x!quant.uniform>) -> tensor<1x401408x!quant.uniform> +// UINT8-NEXT: return %[[dq]], %arg1 : tensor<1x401408x!quant.uniform>, tensor<1x224x224x3xf32> +} diff --git a/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc b/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc index 53ac0b051e1490..bcfca0690ec1af 100644 --- a/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc +++ b/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc @@ -135,7 +135,11 @@ LogicalResult ModifyIONodesPass::ModifyInputNodes( quantize_op.erase(); } } else { + // `arg` has multiple uses or the user isn't a quantiz op (so we couldn't + // rewrite it to a different type. Make a copy of the `arg` and replace + // its use. new_arg = block.addArgument(arg_type); + arg.replaceAllUsesWith(new_arg); } block.eraseArgument(0); } diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize.cc b/tensorflow/compiler/mlir/lite/transforms/optimize.cc index 859e57fd178c2f..e0b3d73cf4e863 100644 --- a/tensorflow/compiler/mlir/lite/transforms/optimize.cc +++ b/tensorflow/compiler/mlir/lite/transforms/optimize.cc @@ -65,6 +65,9 @@ constexpr char kRelu6[] = "RELU6"; constexpr char kRelu1[] = "RELU_N1_TO_1"; bool L2NormalizeReduceAxis(Value sq_op, DenseElementsAttr axis) { + if (axis.getNumElements() == 0) { + return false; + } if (sq_op.getType().cast().getRank() - 1 == *axis.getValues().begin() || *axis.getValues().begin() == -1) { diff --git a/tensorflow/compiler/xla/statusor.h b/tensorflow/compiler/xla/statusor.h index a32e2ad9851b0b..da6fa9a19031d6 100644 --- a/tensorflow/compiler/xla/statusor.h +++ b/tensorflow/compiler/xla/statusor.h @@ -21,8 +21,7 @@ limitations under the License. namespace xla { // Use steam_executor's StatusOr so we don't duplicate code. -template -using StatusOr = ::stream_executor::port::StatusOr; +using tensorflow::StatusOr; // TENSORFLOW_STATUS_OK } // namespace xla diff --git a/tensorflow/core/common_runtime/constant_folding.cc b/tensorflow/core/common_runtime/constant_folding.cc index 384ec836cdf9b4..64ed1c398ada30 100644 --- a/tensorflow/core/common_runtime/constant_folding.cc +++ b/tensorflow/core/common_runtime/constant_folding.cc @@ -30,6 +30,7 @@ limitations under the License. #include "tensorflow/core/framework/log_memory.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" @@ -223,7 +224,8 @@ bool IsConstantFoldable( std::unordered_map>* shape_replacement_map) { if (n->IsConstant()) { - return true; + // Skip constant folding resources as they cannot be deep copied. + return n->output_type(0) != DT_RESOURCE; } if (MaybeReplaceShapeOp(n, shape_map, shape_replacement_map)) { return true; diff --git a/tensorflow/core/common_runtime/immutable_executor_state.cc b/tensorflow/core/common_runtime/immutable_executor_state.cc index 03d12a0e98abd5..6af8c5f74c1eca 100644 --- a/tensorflow/core/common_runtime/immutable_executor_state.cc +++ b/tensorflow/core/common_runtime/immutable_executor_state.cc @@ -131,6 +131,7 @@ Status ImmutableExecutorState::Initialize(const Graph& graph) { Status s = params_.create_kernel(n->properties(), &item->kernel); if (!s.ok()) { + params_.delete_kernel(item->kernel); item->kernel = nullptr; s = AttachDef(s, *n); return s; @@ -315,6 +316,10 @@ Status ImmutableExecutorState::BuildControlFlowInfo(const Graph* g, } else if (IsExit(curr_node)) { // Exit to the parent frame. parent = parent_nodes[curr_id]; + if (!parent) { + return errors::InvalidArgument( + "Invalid Exit op: Cannot find a corresponding Enter op."); + } frame_name = cf_info->frame_names[parent->id()]; parent = parent_nodes[parent->id()]; } else { diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc index 375f809b31b369..ec655b2acd0184 100644 --- a/tensorflow/core/common_runtime/shape_refiner.cc +++ b/tensorflow/core/common_runtime/shape_refiner.cc @@ -120,9 +120,26 @@ Status ShapeRefiner::InferShapesForFunctionSubNode( TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle)); outer_context->set_output(index, handle); - auto* resource = node_context->input_handle_shapes_and_types(0); + const std::vector* resource = + node_context->input_handle_shapes_and_types(0); if (resource) { - outer_context->set_output_handle_shapes_and_types(index, *resource); + // `ShapesAndType`s contain `ShapeHandle`s. These `ShapeHandle`s point + // to `Shape`s that are owned by a different inference context too. We + // need to copy them to the outer context to prevent them from being + // destroyed before they are used. + std::vector copied_shapes_and_types; + for (auto& shape_and_type : *resource) { + ShapeHandle handle; + TensorShapeProto proto; + node_context->ShapeHandleToProto(shape_and_type.shape, &proto); + TF_RETURN_IF_ERROR( + outer_context->MakeShapeFromShapeProto(proto, &handle)); + copied_shapes_and_types.push_back( + ShapeAndType(handle, shape_and_type.dtype, shape_and_type.specialized_type)); + } + + outer_context->set_output_handle_shapes_and_types( + index, copied_shapes_and_types); } } diff --git a/tensorflow/core/data/compression_utils.cc b/tensorflow/core/data/compression_utils.cc index bbff3a96667d13..40238a05a2614b 100644 --- a/tensorflow/core/data/compression_utils.cc +++ b/tensorflow/core/data/compression_utils.cc @@ -29,9 +29,10 @@ Status CompressElement(const std::vector& element, int64 total_size = 0; for (auto& component : element) { if (DataTypeCanUseMemcpy(component.dtype())) { - // Some datatypes can be memcopied, allowing us to save two copies - // (AsProtoTensorContent and SerializeToArray). - total_size += DMAHelper::buffer(&component)->size(); + const TensorBuffer* buffer = DMAHelper::buffer(&component); + if (buffer) { + total_size += buffer->size(); + } } else { non_memcpy_components.emplace_back(); component.AsProtoTensorContent(&non_memcpy_components.back()); @@ -53,8 +54,10 @@ Status CompressElement(const std::vector& element, component.shape().AsProto(metadata->mutable_tensor_shape()); if (DataTypeCanUseMemcpy(component.dtype())) { const TensorBuffer* buffer = DMAHelper::buffer(&component); - memcpy(position, buffer->data(), buffer->size()); - metadata->set_tensor_size_bytes(buffer->size()); + if (buffer) { + memcpy(position, buffer->data(), buffer->size()); + metadata->set_tensor_size_bytes(buffer->size()); + } } else { TensorProto& proto = non_memcpy_components[non_memcpy_component_index++]; proto.SerializeToArray(position, proto.ByteSizeLong()); @@ -94,8 +97,13 @@ Status UncompressElement(const CompressedElement& compressed, if (DataTypeCanUseMemcpy(metadata.dtype())) { out->emplace_back(metadata.dtype(), metadata.tensor_shape()); TensorBuffer* buffer = DMAHelper::buffer(&out->back()); - iov[i].iov_base = buffer->data(); - iov[i].iov_len = buffer->size(); + if (buffer) { + iov[i].iov_base = buffer->data(); + iov[i].iov_len = buffer->size(); + } else { + iov[i].iov_base = nullptr; + iov[i].iov_len = 0; + } } else { // Allocate an empty Tensor. We will fill it out later after // uncompressing into the tensor_proto_str. diff --git a/tensorflow/core/framework/BUILD b/tensorflow/core/framework/BUILD index de44be07c292d2..ba2f759070c177 100644 --- a/tensorflow/core/framework/BUILD +++ b/tensorflow/core/framework/BUILD @@ -705,6 +705,8 @@ cc_library( ":tensor_shape", ":types_proto_cc", "//tensorflow/core/lib/strings:strcat", + "//tensorflow/core/platform:macros", + "//tensorflow/core/platform:statusor", "//tensorflow/core/platform:tensor_coding", "//tensorflow/core/platform:types", "//tensorflow/core/util:managed_stack_trace", @@ -795,6 +797,7 @@ tf_cuda_library( "//tensorflow/core/lib/strings:str_util", "//tensorflow/core/lib/strings:strcat", "//tensorflow/core/platform:abi", + "//tensorflow/core/platform:errors", "//tensorflow/core/platform:logging", "//tensorflow/core/platform:macros", "//tensorflow/core/platform:platform_port", diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc index 76fe36e7f1e2a6..39e3ed888cec5f 100644 --- a/tensorflow/core/framework/attr_value_util.cc +++ b/tensorflow/core/framework/attr_value_util.cc @@ -45,7 +45,7 @@ constexpr int kMaxTensorNestDepth = 100; // not fully defined return -1. int64 TensorByteSize(const TensorProto& t) { // num_elements returns -1 if shape is not fully defined. - int64 num_elems = TensorShape(t.tensor_shape()).num_elements(); + int64 num_elems = PartialTensorShape(t.tensor_shape()).num_elements(); return num_elems < 0 ? -1 : num_elems * DataTypeSize(t.dtype()); } diff --git a/tensorflow/core/framework/common_shape_fns.cc b/tensorflow/core/framework/common_shape_fns.cc index ff527e48c7283c..a7f0740c8e8720 100644 --- a/tensorflow/core/framework/common_shape_fns.cc +++ b/tensorflow/core/framework/common_shape_fns.cc @@ -664,6 +664,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c, if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64 input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); + if (filter_input_depth_value == 0) + return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, @@ -673,6 +675,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c, int64 num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64 output_depth_value = c->Value(output_depth_dim); + if (num_groups == 0) + return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, @@ -803,6 +807,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) { if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64 input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); + if (filter_input_depth_value == 0) + return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, @@ -812,6 +818,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) { int64 num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64 output_depth_value = c->Value(output_depth_dim); + if (num_groups == 0) + return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, @@ -1929,7 +1937,7 @@ Status ConcatShapeHelper(InferenceContext* c, int start_value_index, } // Minimum required number of dimensions. - const int min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; + const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; ShapeHandle output_before; ShapeHandle output_after; @@ -2424,6 +2432,9 @@ Status SparseReduceShapeFn(InferenceContext* c) { int64 ndims = shape_vec.size(); absl::flat_hash_set axes; + if (ndims == 0) + return errors::InvalidArgument( + "Number of dims in shape tensor must not be 0"); for (int i = 0; i < axes_vec.size(); i++) { axes.insert((axes_vec(i) + ndims) % ndims); } diff --git a/tensorflow/core/framework/function.cc b/tensorflow/core/framework/function.cc index b84cfa31157233..ac066da8ba53ad 100644 --- a/tensorflow/core/framework/function.cc +++ b/tensorflow/core/framework/function.cc @@ -177,7 +177,9 @@ class FunctionInstantiationHelper { DataTypeVector dtypes; TF_RETURN_IF_ERROR( ArgNumType(attr_values, arg_def, &is_type_list, &dtypes)); - CHECK_GE(dtypes.size(), size_t{1}); + if (dtypes.size() < size_t{1}) { + return errors::Internal("Expected a list of at least one dtype"); + } int arg_index = result_.nodes.size(); TF_RETURN_IF_ERROR( AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes})); @@ -185,7 +187,11 @@ class FunctionInstantiationHelper { for (size_t i = 0; i < dtypes.size(); ++i) { TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i), {true, arg_index, 0, false, {dtypes[i]}})); - DCHECK_EQ(arg_index, result_.nodes.size()); + if (arg_index != result_.nodes.size()) { + return errors::Internal( + "Expected arg_index to be equal to the number of nodes in result.", + " Got ", arg_index, " and ", result_.nodes.size()); + } string name = arg_def.name(); if (dtypes.size() > 1) { strings::StrAppend(&name, "_", i); diff --git a/tensorflow/core/framework/op_def_util.cc b/tensorflow/core/framework/op_def_util.cc index 486f92b3b20fdb..8500f247bf0712 100644 --- a/tensorflow/core/framework/op_def_util.cc +++ b/tensorflow/core/framework/op_def_util.cc @@ -818,9 +818,10 @@ bool RepeatedAttrDefEqual( const protobuf::RepeatedPtrField& a2) { std::unordered_map a1_set; for (const OpDef::AttrDef& def : a1) { - DCHECK(a1_set.find(def.name()) == a1_set.end()) - << "AttrDef names must be unique, but '" << def.name() - << "' appears more than once"; + if (a1_set.find(def.name()) != a1_set.end()) { + LOG(ERROR) << "AttrDef names must be unique, but '" << def.name() + << "' appears more than once"; + } a1_set[def.name()] = &def; } for (const OpDef::AttrDef& def : a2) { diff --git a/tensorflow/core/framework/resource_handle.cc b/tensorflow/core/framework/resource_handle.cc index e7f4c2afc90a4a..c8306ca5cf23f9 100644 --- a/tensorflow/core/framework/resource_handle.cc +++ b/tensorflow/core/framework/resource_handle.cc @@ -15,14 +15,25 @@ limitations under the License. #include "tensorflow/core/framework/resource_handle.h" #include "tensorflow/core/framework/resource_handle.pb.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/strings/strcat.h" +#include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/platform/macros.h" namespace tensorflow { ResourceHandle::ResourceHandle() {} ResourceHandle::ResourceHandle(const ResourceHandleProto& proto) { - FromProto(proto); + TF_CHECK_OK(FromProto(proto)); +} + +Status ResourceHandle::BuildResourceHandle(const ResourceHandleProto& proto, + ResourceHandle* out) { + if (out == nullptr) + return errors::Internal( + "BuildResourceHandle() was called with nullptr for the output"); + return out->FromProto(proto); } ResourceHandle::~ResourceHandle() {} @@ -40,7 +51,7 @@ void ResourceHandle::AsProto(ResourceHandleProto* proto) const { } } -void ResourceHandle::FromProto(const ResourceHandleProto& proto) { +Status ResourceHandle::FromProto(const ResourceHandleProto& proto) { set_device(proto.device()); set_container(proto.container()); set_name(proto.name()); @@ -49,10 +60,16 @@ void ResourceHandle::FromProto(const ResourceHandleProto& proto) { std::vector dtypes_and_shapes; for (const auto& dtype_and_shape : proto.dtypes_and_shapes()) { DataType dtype = dtype_and_shape.dtype(); - PartialTensorShape shape(dtype_and_shape.shape()); + PartialTensorShape shape; + Status s = PartialTensorShape::BuildPartialTensorShape( + dtype_and_shape.shape(), &shape); + if (!s.ok()) { + return s; + } dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{dtype, shape}); } dtypes_and_shapes_ = std::move(dtypes_and_shapes); + return Status::OK(); } string ResourceHandle::SerializeAsString() const { @@ -63,9 +80,7 @@ string ResourceHandle::SerializeAsString() const { bool ResourceHandle::ParseFromString(const string& s) { ResourceHandleProto proto; - const bool status = proto.ParseFromString(s); - if (status) FromProto(proto); - return status; + return proto.ParseFromString(s) && FromProto(proto).ok(); } string ResourceHandle::DebugString() const { @@ -98,7 +113,9 @@ bool DecodeResourceHandleList(std::unique_ptr d, if (!proto.ParseFromArray(d->Data(sizes[i]), sizes[i])) { return false; } - ps[i].FromProto(proto); + if (!ps[i].FromProto(proto).ok()) { + return false; + } } return true; } diff --git a/tensorflow/core/framework/resource_handle.h b/tensorflow/core/framework/resource_handle.h index 3921d80faf4fe4..cba3b25d4b29f2 100644 --- a/tensorflow/core/framework/resource_handle.h +++ b/tensorflow/core/framework/resource_handle.h @@ -38,6 +38,11 @@ class ResourceHandle { ResourceHandle(const ResourceHandleProto& proto); ~ResourceHandle(); + // Use this factory method if the `proto` comes from user controlled input, to + // prevent a denial of service. + static Status BuildResourceHandle(const ResourceHandleProto& proto, + ResourceHandle* out); + // Unique name for the device containing the resource. const std::string& device() const { return device_; } @@ -83,7 +88,7 @@ class ResourceHandle { // Conversion to and from ResourceHandleProto void AsProto(ResourceHandleProto* proto) const; - void FromProto(const ResourceHandleProto& proto); + Status FromProto(const ResourceHandleProto& proto); // Serialization via ResourceHandleProto std::string SerializeAsString() const; diff --git a/tensorflow/core/framework/shape_inference.cc b/tensorflow/core/framework/shape_inference.cc index 721c20b7491aa0..432caaea2792e2 100644 --- a/tensorflow/core/framework/shape_inference.cc +++ b/tensorflow/core/framework/shape_inference.cc @@ -14,6 +14,8 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/shape_inference.h" +#include + #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/partial_tensor_shape.h" @@ -779,6 +781,19 @@ Status InferenceContext::InternalMakeShapeFromTensor( return ReturnUnknownShape(out); } const auto num_dims = Value(shape_dim); + // TODO(mihaimaruseac): Should be `TensorShape::MaxDimensions()` as we are + // not able to materialize shapes with more than this number of dimensions + // but then shape inference would fail for operations such as + // `tf.range`/`tf.ones`, etc. where the shape is not really materialized, + // only used during the inference. Hence, just prevent doing a `reserve` + // with a very large argument. + const int64_t max_dimensions = 1 << 25; + if (num_dims >= max_dimensions) { + return errors::Internal( + "Cannot create a tensor with ", num_dims, + " dimensions, as these would be more than maximum of ", + max_dimensions); + } std::vector dims; dims.reserve(num_dims); for (int i = 0; i < num_dims; i++) dims.push_back(UnknownDim()); diff --git a/tensorflow/core/framework/tensor.cc b/tensorflow/core/framework/tensor.cc index e5eb512a6422a9..9e3190cdab5ad2 100644 --- a/tensorflow/core/framework/tensor.cc +++ b/tensorflow/core/framework/tensor.cc @@ -48,6 +48,7 @@ limitations under the License. #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/protobuf.h" @@ -530,6 +531,46 @@ TensorBuffer* FromProtoField(Allocator* a, const TensorProto& in, int64 n) { return buf; } +// Separate implementation for `ResourceHandle` to handle the case when the +// proto for the resource is invalid. See `resource_handle.h` constructor and +// static factory builder. +template <> +TensorBuffer* FromProtoField(Allocator* a, + const TensorProto& in, int64_t n) { + CHECK_GT(n, 0); + Buffer* buf = new Buffer(a, n); + ResourceHandle* data = buf->template base(); + if (data == nullptr) { + buf->Unref(); + return nullptr; + } + const int64_t in_n = ProtoHelper::NumElements(in); + if (in_n <= 0) { + std::fill_n(data, n, ResourceHandle()); + } else { + // If tensor shape says we have n < in_n elements in the output tensor + // then make sure to only decode the first n out of the in_n elements in the + // in tensors. In all other cases, we decode all in_n elements of in and set + // the remaining elements up to n to be the default ResourceHandle() value. + const int64_t real_n = n < in_n ? n : in_n; + for (int64_t i = 0; i < real_n; ++i) { + Status s = ResourceHandle::BuildResourceHandle(in.resource_handle_val(i), + &data[i]); + if (!s.ok()) { + LOG(ERROR) << "Could not decode resource handle from proto \"" + << in.resource_handle_val(i).ShortDebugString() + << "\", returned status: " << s.ToString(); + buf->Unref(); + return nullptr; + } + } + for (int64_t i = in_n; i < n; ++i) { + data[i] = ResourceHandle(); + } + } + return buf; +} + template <> TensorBuffer* FromProtoField(Allocator* a, const TensorProto& in, int64 n) { @@ -717,11 +758,11 @@ bool Tensor::RefCountIsOne() const { // The macro CASES() expands to a switch statement conditioned on // TYPE_ENUM. Each case expands the STMTS after a typedef for T. #define SINGLE_ARG(...) __VA_ARGS__ -#define CASE(TYPE, STMTS) \ - case DataTypeToEnum::value: { \ - typedef TYPE T; \ - STMTS; \ - break; \ +#define CASE(TYPE, STMTS) \ + case DataTypeToEnum::value: { \ + typedef TF_ATTRIBUTE_UNUSED TYPE T; \ + STMTS; \ + break; \ } #define CASES_WITH_DEFAULT(TYPE_ENUM, STMTS, INVALID, DEFAULT) \ switch (TYPE_ENUM) { \ @@ -757,9 +798,8 @@ bool Tensor::RefCountIsOne() const { } #define CASES(TYPE_ENUM, STMTS) \ - CASES_WITH_DEFAULT(TYPE_ENUM, STMTS, \ - LOG(FATAL) << "Unexpected type: " << TYPE_ENUM; \ - , LOG(FATAL) << "Type not set";) + CASES_WITH_DEFAULT(TYPE_ENUM, STMTS, LOG(FATAL) << "Type not set"; \ + , LOG(FATAL) << "Unexpected type: " << TYPE_ENUM;) Tensor::Tensor(Allocator* a, DataType type, const TensorShape& shape) : shape_(shape), buf_(nullptr) { @@ -789,6 +829,16 @@ Tensor::Tensor(Allocator* a, DataType type, const TensorShape& shape, } } +Status Tensor::BuildTensor(DataType type, const TensorShape& shape, + Tensor* out_tensor) { + // Avoid crashes due to invalid or unsupported types. + CASES_WITH_DEFAULT( + type, {}, return errors::InvalidArgument("Type not set"), + return errors::InvalidArgument("Unexpected type: ", DataType_Name(type))); + *out_tensor = Tensor(type, shape); + return Status::OK(); +} + // NOTE(mrry): The default allocator for a Tensor (when none is specified) is // the default CPU allocator for NUMA zone 0. Accessing that currently involves // acquiring a lock, which guards initialization of the per-NUMA zone @@ -927,6 +977,15 @@ bool Tensor::FromProto(Allocator* a, const TensorProto& proto) { dtype_error = true, dtype_error = true); } if (dtype_error || p == nullptr) return false; + } else { + // Handle the case of empty tensors (N = 0) or tensors with incomplete shape + // (N = -1). All other values of `shape.num_elements()` should be invalid by + // construction. + // Here, we just need to validate that the `proto.dtype()` value is valid. + bool dtype_error = false; + CASES_WITH_DEFAULT(proto.dtype(), break, dtype_error = true, + dtype_error = true); + if (dtype_error) return false; } shape_ = shape; set_dtype(proto.dtype()); diff --git a/tensorflow/core/framework/tensor.h b/tensorflow/core/framework/tensor.h index 33a240d85dc5c9..dd01bfc272010a 100644 --- a/tensorflow/core/framework/tensor.h +++ b/tensorflow/core/framework/tensor.h @@ -170,6 +170,15 @@ class Tensor { /// for details. explicit Tensor(DataType type); + /// \brief Initializes a tensor with the input `type` and `shape`, or returns + /// an error and leaves `out_tensor` unmodified. This factory method should be + /// used instead of the corresponding constructor if calling code cannot + /// validate that the `DataType` is valid and supported. + /// + /// The underlying buffer is allocated using a `CPUAllocator`. + static Status BuildTensor(DataType type, const TensorShape& shape, + Tensor* out_tensor); + private: // A tag type for selecting the `Tensor` constructor overload that creates a // scalar tensor in host memory. diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index 5144577e7aa0f5..117a70b7fa39a4 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -229,7 +229,7 @@ Status TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { if (!kIsPartial && !large_size) { for (auto s : dim_sizes) { if (TF_PREDICT_FALSE(s < 0)) { - return errors::Internal( + return errors::InvalidArgument( "Expected shape dimensions to be non-negative, got ", s); } } @@ -411,7 +411,8 @@ template Status TensorShapeBase::AddDimWithStatus(int64 size) { if (!kIsPartial) { if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + return errors::InvalidArgument("Expected a non-negative size, got ", + size); } } @@ -420,7 +421,7 @@ Status TensorShapeBase::AddDimWithStatus(int64 size) { } if (TF_PREDICT_FALSE(ndims_byte() >= MaxDimensions())) { - return errors::Internal("Too many dimensions in tensor"); + return errors::InvalidArgument("Too many dimensions in tensor"); } int64 new_num_elements; @@ -429,9 +430,9 @@ Status TensorShapeBase::AddDimWithStatus(int64 size) { } else { new_num_elements = MultiplyWithoutOverflow(num_elements(), size); if (TF_PREDICT_FALSE(new_num_elements < 0)) { - return errors::Internal("Encountered overflow when multiplying ", - num_elements(), " with ", size, - ", result: ", new_num_elements); + return errors::InvalidArgument("Encountered overflow when multiplying ", + num_elements(), " with ", size, + ", result: ", new_num_elements); } } @@ -521,7 +522,8 @@ template Status TensorShapeBase::InsertDimWithStatus(int d, int64 size) { if (!kIsPartial) { if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + return errors::InvalidArgument("Expected a non-negative size, got ", + size); } } @@ -591,13 +593,14 @@ void TensorShapeBase::set_dim(int d, int64 size) { template Status TensorShapeBase::SetDimWithStatus(int d, int64 size) { if (TF_PREDICT_FALSE(d < 0)) { - return errors::Internal("Index must be non-negative, got ", d); + return errors::InvalidArgument("Index must be non-negative, got ", d); } if (TF_PREDICT_FALSE(d >= dims())) { - return errors::Internal("Index must be less than ", dims(), ", got ", d); + return errors::InvalidArgument("Index must be less than ", dims(), ", got ", + d); } - if (TF_PREDICT_FALSE(size < 0)) { - return errors::Internal("Expected a non-negative size, got ", size); + if (TF_PREDICT_FALSE(!kIsPartial && size < 0)) { + return errors::InvalidArgument("Expected a non-negative size, got ", size); } if (tag() == REP16 && size < kMaxRep16) { diff --git a/tensorflow/core/framework/tensor_shape.h b/tensorflow/core/framework/tensor_shape.h index a690123f0ceaf9..d12994304faf13 100644 --- a/tensorflow/core/framework/tensor_shape.h +++ b/tensorflow/core/framework/tensor_shape.h @@ -359,6 +359,23 @@ class TensorShape : public TensorShapeBase { public: using TensorShapeBase::TensorShapeBase; + // These factory methods should be used instead of the constructors that take + // an array of sizes if calling code cannot validate that the sizes specify a + // valid `TensorShape`. + // The value in `*out` is valid iff the returned value is `Status::OK`. + static Status BuildTensorShape(gtl::ArraySlice dim_sizes, + TensorShape* out) { + return BuildTensorShapeBase(dim_sizes, out); + } + static Status BuildTensorShape(std::initializer_list dim_sizes, + TensorShape* out) { + return BuildTensorShape(gtl::ArraySlice(dim_sizes), out); + } + static Status BuildTensorShape(const TensorShapeProto& proto, + TensorShape* out) { + return BuildTensorShapeBase(proto, out); + } + /// Allow a TensorShape to be used as a PartialTensorShape without copying operator const PartialTensorShape&() const; // NOLINT(runtime/explicit) @@ -508,6 +525,23 @@ class PartialTensorShape : public TensorShapeBase { PartialTensorShape() {} using TensorShapeBase::TensorShapeBase; + // These factory methods should be used instead of the constructors that take + // an array of sizes if calling code cannot validate that the sizes specify a + // valid `PartialTensorShape`. + // The value in `*out` is valid iff the returned value is `Status::OK`. + static Status BuildPartialTensorShape(gtl::ArraySlice dim_sizes, + PartialTensorShape* out) { + return BuildTensorShapeBase(dim_sizes, out); + } + static Status BuildPartialTensorShape( + std::initializer_list dim_sizes, PartialTensorShape* out) { + return BuildPartialTensorShape(gtl::ArraySlice(dim_sizes), out); + } + static Status BuildPartialTensorShape(const TensorShapeProto& proto, + PartialTensorShape* out) { + return BuildTensorShapeBase(proto, out); + } + /// Add a dimension to the end ("inner-most"), returns a new /// PartialTensorShape. /// REQUIRES: `size >= -1`, where -1 means unknown diff --git a/tensorflow/core/framework/tensor_shape_test.cc b/tensorflow/core/framework/tensor_shape_test.cc index f41d00f2a46472..cf087d0647f662 100644 --- a/tensorflow/core/framework/tensor_shape_test.cc +++ b/tensorflow/core/framework/tensor_shape_test.cc @@ -214,7 +214,7 @@ TEST(TensorShapeTest, AddDimWithStatus) { ASSERT_EQ(4, s.dims()); status = s.AddDimWithStatus(-1); - EXPECT_EQ(tensorflow::error::INTERNAL, status.code()); + EXPECT_EQ(tensorflow::error::INVALID_ARGUMENT, status.code()); } TEST(TensorShapeTest, Factory) { @@ -225,7 +225,7 @@ TEST(TensorShapeTest, Factory) { ASSERT_EQ(3, s.dims()); status = TensorShape::BuildTensorShapeBase({-10, 5, 20}, &s); - EXPECT_EQ(tensorflow::error::INTERNAL, status.code()); + EXPECT_EQ(tensorflow::error::INVALID_ARGUMENT, status.code()); } // ----------------------------------------------------------------------- diff --git a/tensorflow/core/framework/tensor_slice.cc b/tensorflow/core/framework/tensor_slice.cc index 975e1e2e24a439..7041b011157434 100644 --- a/tensorflow/core/framework/tensor_slice.cc +++ b/tensorflow/core/framework/tensor_slice.cc @@ -14,7 +14,10 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/tensor_slice.h" + +#include #include + #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/str_util.h" @@ -44,6 +47,34 @@ TensorSlice::TensorSlice( } } +Status TensorSlice::BuildTensorSlice(const TensorSliceProto& proto, + TensorSlice* output) { + output->Clear(); + output->starts_.reserve(proto.extent_size()); + output->lengths_.reserve(proto.extent_size()); + for (const auto& e : proto.extent()) { + int64_t l = GetExtentLength(e); + if (e.start() != 0 || l != kFullExtent) { + if (e.start() < 0 || l <= 0) { + return errors::InvalidArgument( + "Expected non-negative start and positive length but got start = ", + e.start(), ", length = ", l, ": extent = ", e.ShortDebugString()); + } + // Calculating the extent end must not cause signed integer overflow. + if (static_cast(e.start()) + static_cast(e.length()) > + std::numeric_limits::max()) { + return errors::InvalidArgument( + "Extent end exceeds the maximum possible size: extent = ", + e.ShortDebugString()); + } + } + output->starts_.push_back(e.start()); + output->lengths_.push_back(l); + } + + return Status::OK(); +} + Status TensorSlice::Parse(const string& str, TensorSlice* slice) { std::vector items = str_util::Split(str, ':', str_util::SkipEmpty()); slice->starts_.reserve(items.size()); diff --git a/tensorflow/core/framework/tensor_slice.h b/tensorflow/core/framework/tensor_slice.h index 82f21fb17eec78..4c2795694564da 100644 --- a/tensorflow/core/framework/tensor_slice.h +++ b/tensorflow/core/framework/tensor_slice.h @@ -47,6 +47,12 @@ class TensorSlice { explicit TensorSlice(const TensorSliceProto& proto); explicit TensorSlice(std::initializer_list> extents); + // This factory methods should be used instead of the constructor that takes a + // `TensorSliceProto` if calling code cannot validate that the sizes specify a + // valid `TensorSlice`. + static Status BuildTensorSlice(const TensorSliceProto& proto, + TensorSlice* output); + static Status Parse(const string& str, TensorSlice* output); static TensorSlice ParseOrDie(const string& str) { TensorSlice ret; diff --git a/tensorflow/core/framework/tensor_slice_test.cc b/tensorflow/core/framework/tensor_slice_test.cc index 54e680484e228b..69b7c7cd084e33 100644 --- a/tensorflow/core/framework/tensor_slice_test.cc +++ b/tensorflow/core/framework/tensor_slice_test.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/core/framework/tensor_slice.h" +#include + #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" @@ -123,6 +125,48 @@ TEST(TensorSliceTest, Serialization) { } } +// Testing `BuildTensorSlice` with valid and invalid input protos. +TEST(TensorSliceTest, BuildTensorSlice) { + TensorSliceProto proto; + TensorSlice({{0, -1}, {0, 10}, {14, 1}}).AsProto(&proto); + TensorSlice s; + + // Successful building. + { + TF_ASSERT_OK(TensorSlice::BuildTensorSlice(proto, &s)); + EXPECT_EQ("-:0,10:14,1", s.DebugString()); + } + + // Failed building due to negative extent start. + { + TensorSliceProto invalid_proto = proto; + invalid_proto.mutable_extent(0)->set_start(-1); + EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok()); + } + + // Failed building due to negative extent length. + { + TensorSliceProto invalid_proto = proto; + invalid_proto.mutable_extent(2)->set_length(-1); + EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok()); + } + + // Failed building due to missing extent length. + { + TensorSliceProto invalid_proto = proto; + invalid_proto.mutable_extent(2)->clear_length(); + EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok()); + } + + // Failed building due to extent end overflowing. + { + TensorSliceProto invalid_proto = proto; + invalid_proto.mutable_extent(2)->set_length( + std::numeric_limits::max()); + EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok()); + } +} + // Testing the slice intersection TEST(TensorSliceTest, Intersection) { // "EVERYTHING" intersects with everything diff --git a/tensorflow/core/grappler/costs/BUILD b/tensorflow/core/grappler/costs/BUILD index 9204607f3838e8..80a6648cd5b2cf 100644 --- a/tensorflow/core/grappler/costs/BUILD +++ b/tensorflow/core/grappler/costs/BUILD @@ -182,6 +182,7 @@ tf_cuda_library( "//tensorflow/core:lib_proto_parsing", "//tensorflow/core:protos_all_cc", "//tensorflow/core/grappler:utils", + "//tensorflow/core/util:overflow", "//tensorflow/core/grappler/clusters:utils", ] + tf_protos_grappler(), ) @@ -338,22 +339,11 @@ cc_library( "//tensorflow/core:lib", "//tensorflow/core:protos_all_cc", "//tensorflow/core/grappler/clusters:utils", + "//tensorflow/core/util:overflow", + "//tensorflow/core/platform:statusor", ] + tf_protos_grappler(), ) -tf_cc_test( - name = "op_level_cost_estimator_test", - srcs = ["op_level_cost_estimator_test.cc"], - tags = ["no_oss"], # b/163222310 - deps = [ - ":op_level_cost_estimator", - "//tensorflow/core:framework", - "//tensorflow/core:protos_all_cc", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - ], -) - cc_library( name = "analytical_cost_estimator", srcs = ["analytical_cost_estimator.cc"], diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc index 644efe3326ab9f..441a7524bb4eda 100644 --- a/tensorflow/core/grappler/costs/graph_properties.cc +++ b/tensorflow/core/grappler/costs/graph_properties.cc @@ -1128,7 +1128,12 @@ class SymbolicShapeRefiner { GetUnknownOutputShape(node, output_port); InferenceContext* ctx = GetContext(node); if (ctx == nullptr) { - return errors::InvalidArgument("Missing context"); + return errors::InvalidArgument("SetUnknownShape: Missing context"); + } + if (output_port < 0 || output_port >= ctx->num_outputs()) { + return errors::InvalidArgument( + "SetUnknownShape: output_port must be in [0, ", ctx->num_outputs(), + ") but was ", output_port); } ctx->set_output(output_port, shape); return Status::OK(); diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc index 009f2471d39fd5..ae6bc399aec544 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/grappler/costs/op_context.h" #include "tensorflow/core/grappler/costs/utils.h" #include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/util/overflow.h" namespace tensorflow { namespace grappler { @@ -1535,7 +1536,14 @@ int64 OpLevelCostEstimator::CalculateTensorElementCount( auto tensor_shape = MaybeGetMinimumShape(tensor.shape(), num_dims, found_unknown_shapes); for (const auto& dim : tensor_shape.dim()) { - tensor_size *= dim.size(); + int64_t new_tensor_size = MultiplyWithoutOverflow(tensor_size, dim.size()); + if (new_tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing element count of a " + "tensor, multiplying " + << tensor_size << " with " << dim.size(); + return -1; + } + tensor_size = new_tensor_size; } return tensor_size; } @@ -1545,7 +1553,13 @@ int64 OpLevelCostEstimator::CalculateTensorSize( int64 count = CalculateTensorElementCount(tensor, found_unknown_shapes); int size = DataTypeSize(BaseType(tensor.dtype())); VLOG(2) << "Count: " << count << " DataTypeSize: " << size; - return count * size; + int64_t tensor_size = MultiplyWithoutOverflow(count, size); + if (tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing tensor size, multiplying " + << count << " with " << size; + return -1; + } + return tensor_size; } int64 OpLevelCostEstimator::CalculateInputSize(const OpInfo& op_info, @@ -1598,7 +1612,14 @@ int64 OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info, auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims, found_unknown_shapes); for (const auto& dim : output_shape.dim()) { - output_size *= dim.size(); + int64_t new_output_size = + MultiplyWithoutOverflow(output_size, dim.size()); + if (new_output_size < 0) { + VLOG(1) << "Overflow encountered when estimating cost, multiplying " + << output_size << " with " << dim.size(); + return -1; + } + output_size = new_output_size; } total_output_size += output_size; VLOG(1) << "Output Size: " << output_size @@ -2121,7 +2142,7 @@ OpInfo::TensorProperties OpLevelCostEstimator::DescribeTensor( } /* static */ -OpLevelCostEstimator::ConvolutionDimensions +StatusOr OpLevelCostEstimator::OpDimensionsFromInputs( const TensorShapeProto& original_image_shape, const OpInfo& op_info, bool* found_unknown_shapes) { @@ -2158,6 +2179,11 @@ OpLevelCostEstimator::OpDimensionsFromInputs( std::vector strides = GetStrides(op_info); int64 sx = strides[x_index]; int64 sy = strides[y_index]; + if (sx == 0 || sy == 0) { + return errors::InvalidArgument( + "Stride must be > 0 for Height and Width, but got (", sy, ", ", sx, + ")"); + } const auto padding = GetPadding(op_info); int64 ox = GetOutputSize(ix, kx, sx, padding); @@ -2174,8 +2200,9 @@ Status OpLevelCostEstimator::PredictMaxPool(const OpContext& op_context, bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); // kx * ky - 1 comparisons per output (kx * xy > 1) // or 1 copy per output (kx * k1 = 1). int per_output_ops = dims.kx * dims.ky == 1 ? 1 : dims.kx * dims.ky - 1; @@ -2215,8 +2242,9 @@ Status OpLevelCostEstimator::PredictMaxPoolGrad(const OpContext& op_context, op_info.ShortDebugString()); } - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); int64 ops = 0; if (dims.kx == 1 && dims.ky == 1) { @@ -2291,8 +2319,9 @@ Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context, bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); // kx * ky - 1 additions and 1 multiplication per output. int64 ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky; @@ -2348,8 +2377,9 @@ Status OpLevelCostEstimator::PredictAvgPoolGrad(const OpContext& op_context, found_unknown_shapes = true; } - ConvolutionDimensions dims = - OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN( + ConvolutionDimensions dims, + OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes)); int64 ops = 0; if (dims.kx <= dims.sx && dims.ky <= dims.sy) { @@ -2375,8 +2405,9 @@ Status OpLevelCostEstimator::PredictFusedBatchNorm( // offset: op_info.inputs(2) // mean: op_info.inputs(3) --> only for inference // variance: op_info.inputs(4) --> only for inference - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(0).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info, + &found_unknown_shapes)); const bool is_training = IsTraining(op_info); int64 ops = 0; @@ -2425,8 +2456,9 @@ Status OpLevelCostEstimator::PredictFusedBatchNormGrad( // scale: op_info.inputs(2) // mean: op_info.inputs(3) // variance or inverse of variance: op_info.inputs(4) - ConvolutionDimensions dims = OpDimensionsFromInputs( - op_info.inputs(1).shape(), op_info, &found_unknown_shapes); + TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims, + OpDimensionsFromInputs(op_info.inputs(1).shape(), op_info, + &found_unknown_shapes)); int64 ops = 0; const auto rsqrt_cost = Eigen::internal::functor_traits< @@ -2646,27 +2678,42 @@ Status OpLevelCostEstimator::PredictCropAndResize(const OpContext& op_context, // calculation differs from rough estimate in implementation, as it separates // out cost per box from cost per pixel and cost per element. + // Since crop arguments are user controlled, check for overflow. + int64_t crop_area = MultiplyWithoutOverflow(crop_height, crop_width); + if (crop_area < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_height, " with ", crop_width, + " would overflow"); + int64_t crop_volume = MultiplyWithoutOverflow(crop_area, num_boxes); + if (crop_volume < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_area, " with ", num_boxes, + " would overflow"); + int64_t crop_depth = MultiplyWithoutOverflow(crop_height, num_boxes); + if (crop_depth < 0) + return errors::InvalidArgument("Cannot estimate cost, multiplying ", + crop_height, " with ", num_boxes, + " would overflow"); + // Ops for variables height_scale and width_scale. int64 ops = (sub_cost * 6 + mul_cost * 2 + div_cost * 2) * num_boxes; // Ops for variable in_y. - ops += (mul_cost * 2 + sub_cost + add_cost) * crop_height * num_boxes; + ops += (mul_cost * 2 + sub_cost + add_cost) * crop_depth; // Ops for variable in_x (same computation across both branches). - ops += (mul_cost * 2 + sub_cost + add_cost) * crop_height * crop_width * - num_boxes; + ops += (mul_cost * 2 + sub_cost + add_cost) * crop_volume; // Specify op_cost based on the method. if (use_bilinear_interp) { // Ops for variables top_y_index, bottom_y_index, y_lerp. - ops += (floor_cost + ceil_cost + sub_cost) * crop_height * num_boxes; + ops += (floor_cost + ceil_cost + sub_cost) * crop_depth; // Ops for variables left_x, right_x, x_lerp; - ops += (floor_cost + ceil_cost + sub_cost) * crop_height * crop_width * - num_boxes; + ops += (floor_cost + ceil_cost + sub_cost) * crop_volume; // Ops for innermost loop across depth. ops += (cast_to_float_cost * 4 + add_cost * 3 + sub_cost * 3 + mul_cost * 3) * output_elements; } else /* method == "nearest" */ { // Ops for variables closest_x_index and closest_y_index. - ops += round_cost * 2 * crop_height * crop_width * num_boxes; + ops += round_cost * 2 * crop_volume; // Ops for innermost loop across depth. ops += cast_to_float_cost * output_elements; } diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.h b/tensorflow/core/grappler/costs/op_level_cost_estimator.h index 54382927f7b904..3148de33fa9ba5 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.h +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.h @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/grappler/costs/op_context.h" #include "tensorflow/core/grappler/costs/op_performance_data.pb.h" #include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/util/padding.h" namespace tensorflow { @@ -290,7 +291,7 @@ class OpLevelCostEstimator { bool* found_unknown_shapes); // For Pooling, FusedBatchNorm, and their grad ops. - static ConvolutionDimensions OpDimensionsFromInputs( + static StatusOr OpDimensionsFromInputs( const TensorShapeProto& original_image_shape, const OpInfo& op_info, bool* found_unknown_shapes); diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc index 23373d3dc1b629..eda84ec3276001 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/device_properties.pb.h" @@ -558,9 +559,10 @@ class OpLevelCostEstimatorTest : public ::testing::Test { } bool found_unknown_shapes; - auto dims = OpLevelCostEstimator::OpDimensionsFromInputs( - op_context.op_info.inputs(0).shape(), op_context.op_info, - &found_unknown_shapes); + TF_ASSERT_OK_AND_ASSIGN( + auto dims, OpLevelCostEstimator::OpDimensionsFromInputs( + op_context.op_info.inputs(0).shape(), op_context.op_info, + &found_unknown_shapes)); Padding padding_enum; if (padding == "VALID") { padding_enum = Padding::VALID; @@ -581,6 +583,38 @@ class OpLevelCostEstimatorTest : public ::testing::Test { EXPECT_EQ(padding_enum, dims.padding); } + StatusOr + CallOpDimensionsFromInputs(const int n, const int h, const int w, const int c, + const int kx, const int ky, const int sx, + const int sy, const string& data_format, + const string& padding) { + OpContext op_context; + + const std::vector x = {n, h, w, c}; + const std::vector ksize = {1, kx, ky, 1}; + std::vector strides; + if (data_format == "NHWC") { + strides = {1, sy, sx, 1}; + } else { + strides = {1, 1, sy, sx}; + } + + auto& op_info = op_context.op_info; + SetCpuDevice(&op_info); + op_info.set_op("MaxPool"); + + DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs()); + auto* attr = op_info.mutable_attr(); + SetAttrValue(data_format, &(*attr)["data_format"]); + SetAttrValue(padding, &(*attr)["padding"]); + SetAttrValue(strides, &(*attr)["strides"]); + SetAttrValue(ksize, &(*attr)["ksize"]); + bool found_unknown_shapes; + return OpLevelCostEstimator::OpDimensionsFromInputs( + op_context.op_info.inputs(0).shape(), op_context.op_info, + &found_unknown_shapes); + } + OpLevelCostEstimator estimator_; }; @@ -1383,6 +1417,26 @@ TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputs) { } } +TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputsError) { + std::vector paddings = {"VALID", "SAME"}; + std::vector formats = {"NHWC", "NCHW"}; + for (const auto& p : paddings) { + for (const auto& f : formats) { + // n, h, w, c, kx, ky, sx, sy, data_format, padding. + ASSERT_THAT( + CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 0, 2, f, p), + testing::StatusIs( + error::INVALID_ARGUMENT, + "Stride must be > 0 for Height and Width, but got (2, 0)")); + ASSERT_THAT( + CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 0, f, p), + testing::StatusIs( + error::INVALID_ARGUMENT, + "Stride must be > 0 for Height and Width, but got (0, 2)")); + } + } +} + TEST_F(OpLevelCostEstimatorTest, PredictMaxPool) { auto predict_max_pool = [this](const int n, const int in, const int c, const int k, const int s, diff --git a/tensorflow/core/grappler/costs/utils.cc b/tensorflow/core/grappler/costs/utils.cc index c6bc7555d3d1a3..d48695c0793362 100644 --- a/tensorflow/core/grappler/costs/utils.cc +++ b/tensorflow/core/grappler/costs/utils.cc @@ -45,6 +45,7 @@ limitations under the License. #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/util/device_name_utils.h" +#include "tensorflow/core/util/overflow.h" namespace tensorflow { namespace grappler { @@ -217,7 +218,13 @@ int64 CalculateTensorSize(const OpInfo::TensorProperties& prop) { } int64 num_elems = TensorShape(shape).num_elements(); - return num_elems * size; + int64 tensor_size = MultiplyWithoutOverflow(num_elems, size); + if (tensor_size < 0) { + VLOG(1) << "Overflow encountered when computing tensor size, multiplying " + << num_elems << " with " << size; + return -1; + } + return tensor_size; } int64 CalculateOutputSize( diff --git a/tensorflow/core/grappler/costs/utils_test.cc b/tensorflow/core/grappler/costs/utils_test.cc index db5c11f0fe102d..6f6d3b2a14b0d9 100644 --- a/tensorflow/core/grappler/costs/utils_test.cc +++ b/tensorflow/core/grappler/costs/utils_test.cc @@ -202,6 +202,10 @@ TEST(UtilsTest, CalculateTensorSize) { EXPECT_EQ( DataTypeSize(DT_FLOAT) * 1 * 7 * 1 * 99, CalculateTensorSize(ShapeToTensorProperty({-1, 7, -1, 99}, DT_FLOAT))); + + // Test overflow + EXPECT_EQ(-1, CalculateTensorSize(ShapeToTensorProperty( + {4096, 4096, 4096, 33554432}, DT_FLOAT))); } TEST(UtilsTest, CalculateOutputSize) { diff --git a/tensorflow/core/grappler/mutable_graph_view.cc b/tensorflow/core/grappler/mutable_graph_view.cc index 5119acd6141270..4503c90cf466a9 100644 --- a/tensorflow/core/grappler/mutable_graph_view.cc +++ b/tensorflow/core/grappler/mutable_graph_view.cc @@ -68,6 +68,9 @@ bool IsIdentityConsumingSwitch(const MutableGraphView& graph, } NodeDef* input_node = graph.GetNode(tensor_id.node()); + if (input_node == nullptr) { + return false; + } return IsSwitch(*input_node); } return false; diff --git a/tensorflow/core/grappler/optimizers/auto_parallel.cc b/tensorflow/core/grappler/optimizers/auto_parallel.cc index a537fa256babba..e4e8009f9ccb20 100644 --- a/tensorflow/core/grappler/optimizers/auto_parallel.cc +++ b/tensorflow/core/grappler/optimizers/auto_parallel.cc @@ -152,7 +152,7 @@ Status AutoParallel::Initialize(const GrapplerItem& item) { TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, item.fetch, &train_nodes)); LOG(INFO) << "Number of training nodes: " << train_nodes.size(); - const NodeDef* dequeue_node; + const NodeDef* dequeue_node = nullptr; for (const auto& train_node : train_nodes) { if (IsDequeueOp(*train_node)) { dequeue_node = train_node; diff --git a/tensorflow/core/grappler/optimizers/auto_parallel_test.cc b/tensorflow/core/grappler/optimizers/auto_parallel_test.cc index 1c3186f1ee6e68..3af03a09613883 100644 --- a/tensorflow/core/grappler/optimizers/auto_parallel_test.cc +++ b/tensorflow/core/grappler/optimizers/auto_parallel_test.cc @@ -126,6 +126,30 @@ TEST_F(AutoParallelTest, SimpleParallel) { EXPECT_EQ("^AutoParallel-Control-Fetch", node_gradient.input(0)); } +TEST_F(AutoParallelTest, SimpleParallelNoDequeue) { + tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope(); + Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1}); + Output constant_c = ops::Const(s.WithOpName("constant_c"), 1.0f, {1}); + Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1}); + Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT); + Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a}); + Output add = ops::AddN(s.WithOpName("add"), {constant_a, constant_c}); + Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1}); + Output apply_gradient = ops::ApplyGradientDescent( + s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add}); + + GrapplerItem item; + item.init_ops.push_back("assign"); + item.fetch.push_back("apply_gradient"); + item.init_ops.push_back("assign"); + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + + AutoParallel parallel(2); + GraphDef output; + Status status = parallel.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); +} + } // namespace } // namespace grappler } // namespace tensorflow diff --git a/tensorflow/core/grappler/optimizers/constant_folding.cc b/tensorflow/core/grappler/optimizers/constant_folding.cc index df4cc54757134a..db88130b4afbc5 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding.cc @@ -1013,7 +1013,12 @@ bool ConstantFolding::IsFoldableUncached( } } for (const auto& output_prop : output_props) { - const PartialTensorShape output_shape(output_prop.shape()); + PartialTensorShape output_shape; + if (!PartialTensorShape::BuildPartialTensorShape(output_prop.shape(), + &output_shape) + .ok()) { + return false; + } if (output_shape.IsFullyDefined()) { const int64 num_bytes = output_shape.num_elements() * DataTypeSize(output_prop.dtype()); @@ -1350,6 +1355,11 @@ Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node, } TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value")); const TensorProto& raw_val = input_node->attr().at("value").tensor(); + if (IsRefType(raw_val.dtype())) { + return errors::InvalidArgument( + "Not allowed to construct a tensor with reference dtype, got ", + DataTypeString(raw_val.dtype())); + } Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape()); CHECK(value->FromProto(raw_val)) << "Unable to make Tensor from proto for " << node.name() @@ -1665,15 +1675,21 @@ Status ConstantFolding::FoldGraph( return Status::OK(); } -bool ConstantFolding::IsSimplifiableReshape( +Status ConstantFolding::IsSimplifiableReshape( const NodeDef& node, const GraphProperties& properties) const { if (!IsReshape(node)) { - return false; + return errors::Internal("Node ", node.name(), " is not a Reshape node"); + } + if (2 > node.input_size()) { + return errors::Internal("Node ", node.name(), + " must have at most 2 inputs but has ", + node.input_size()); } - CHECK_LE(2, node.input_size()); const NodeDef* new_shape = node_map_->GetNode(node.input(1)); if (!IsReallyConstant(*new_shape)) { - return false; + return errors::Internal("Node ", node.name(), " has shape ", + new_shape->DebugString(), + " which is not a constant"); } TensorVector outputs; auto outputs_cleanup = gtl::MakeCleanup([&outputs] { @@ -1684,22 +1700,29 @@ bool ConstantFolding::IsSimplifiableReshape( Status s = EvaluateNode(*new_shape, TensorVector(), &outputs); if (!s.ok()) { - return false; + return errors::Internal("Could not evaluate node ", node.name()); + } + if (outputs.size() != 1) { + return errors::Internal("Node ", node.name(), + " must have exactly 1 output but has ", + outputs.size()); } - CHECK_EQ(1, outputs.size()); const std::vector& props = properties.GetInputProperties(node.name()); if (props.empty()) { - return false; + return errors::Internal("Node ", node.name(), " has no properties"); } const OpInfo::TensorProperties& prop = props[0]; if (prop.dtype() == DT_INVALID) { - return false; + return errors::Internal("Node ", node.name(), " has property ", + prop.DebugString(), " with invalid dtype"); } const PartialTensorShape shape(prop.shape()); if (!shape.IsFullyDefined()) { - return false; + return errors::Internal("Node ", node.name(), " has property ", + prop.DebugString(), " with shape ", + shape.DebugString(), " which is not fully defined"); } PartialTensorShape new_dims; @@ -1709,17 +1732,24 @@ bool ConstantFolding::IsSimplifiableReshape( int32 dim = outputs[0]->flat()(i); shp.push_back(dim); } - TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); + s = TensorShapeUtils::MakeShape(shp, &new_dims); + if (!s.ok()) return s; } else { std::vector shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int64 dim = outputs[0]->flat()(i); shp.push_back(dim); } - TF_CHECK_OK(TensorShapeUtils::MakeShape(shp, &new_dims)); + s = TensorShapeUtils::MakeShape(shp, &new_dims); + if (!s.ok()) return s; } - return shape.IsCompatibleWith(new_dims); + if (!shape.IsCompatibleWith(new_dims)) { + return errors::Internal("Expected shape ", shape.DebugString(), + "to be compatible with ", new_dims.DebugString()); + } + + return Status::OK(); } #define IS_VALUE_CASE(DTYPE, VALUE) \ @@ -2905,7 +2935,7 @@ bool ConstantFolding::SimplifyReduction(GraphDef* optimized_graph, bool ConstantFolding::SimplifyReshape(const GraphProperties& properties, bool use_shape_info, NodeDef* node) { if (!use_shape_info || node->attr().count("T") == 0 || - !IsSimplifiableReshape(*node, properties)) { + !IsSimplifiableReshape(*node, properties).ok()) { return false; } DataType output_type = node->attr().at("T").type(); @@ -3454,6 +3484,9 @@ bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node, NodeDef* mul_left_child = node_map_->GetNode(node->input(0)); NodeDef* mul_right_child = node_map_->GetNode(node->input(1)); + if (mul_left_child == nullptr || mul_right_child == nullptr) { + return false; + } // One child must be constant, and the second must be Conv op. const bool left_child_is_constant = IsReallyConstant(*mul_left_child); const bool right_child_is_constant = IsReallyConstant(*mul_right_child); diff --git a/tensorflow/core/grappler/optimizers/constant_folding.h b/tensorflow/core/grappler/optimizers/constant_folding.h index 8462f002021998..0d16f1ade61c3b 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.h +++ b/tensorflow/core/grappler/optimizers/constant_folding.h @@ -132,8 +132,8 @@ class ConstantFolding : public GraphOptimizer { Status FoldGraph(const GraphProperties& properties, GraphDef* output, absl::flat_hash_set* nodes_to_not_simplify); - bool IsSimplifiableReshape(const NodeDef& node, - const GraphProperties& properties) const; + Status IsSimplifiableReshape(const NodeDef& node, + const GraphProperties& properties) const; Status SimplifyGraph(bool use_shape_info, GraphDef* optimized_graph, GraphProperties* properties, absl::flat_hash_set* nodes_to_not_simplify); diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc index 1be7f2692e0f76..0f1bbb729edd23 100644 --- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc @@ -75,8 +75,10 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const { } const NodeDef* input = node_map_->GetNode(NodeName(node.input(0))); - CHECK(input != nullptr) << "node = " << node.name() - << " input = " << node.input(0); + if (input == nullptr) { + VLOG(1) << "node = " << node.name() << " input = " << node.input(0); + return false; + } // Don't remove Identity nodes corresponding to Variable reads or following // Recv. if (IsVariable(*input) || IsRecv(*input)) { diff --git a/tensorflow/core/kernels/assign_op.h b/tensorflow/core/kernels/assign_op.h index 74f926bdc88bf7..8aa56e2e29ed0b 100644 --- a/tensorflow/core/kernels/assign_op.h +++ b/tensorflow/core/kernels/assign_op.h @@ -50,6 +50,12 @@ class AssignOp : public OpKernel { // We always return the input ref. context->forward_ref_input_to_ref_output(0, 0); + // Prevent copying uninitialized data, to solve harder to debug undefined + // behaviors that cannot be traced back to the original tensor. + OP_REQUIRES( + context, rhs.IsInitialized(), + errors::Internal("Right hand side of AssignOp is not initialized")); + // We can't always know how this value will be used downstream, so make // conservative assumptions in specifying constraints on the memory // allocation attributes, unless the Grappler graph analysis determined that diff --git a/tensorflow/core/kernels/bincount_op.cc b/tensorflow/core/kernels/bincount_op.cc index 258266ab29d33f..5c2ee797e62cea 100644 --- a/tensorflow/core/kernels/bincount_op.cc +++ b/tensorflow/core/kernels/bincount_op.cc @@ -235,6 +235,9 @@ class DenseBincountOp : public OpKernel { const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, @@ -331,6 +334,9 @@ class SparseBincountOp : public OpKernel { const auto weights = ctx->input(4).flat(); const int64 weights_size = weights.size(); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, @@ -364,6 +370,16 @@ class SparseBincountOp : public OpKernel { for (int64 i = 0; i < indices_mat.dimension(0); ++i) { const int64 batch = indices_mat(i, 0); const Tidx bin = values(i); + OP_REQUIRES( + ctx, batch < out.dimension(0), + errors::InvalidArgument("Index out of bound. `batch` (", batch, + ") must be less than the dimension size (", + out.dimension(0), ").")); + OP_REQUIRES( + ctx, bin < out.dimension(1), + errors::InvalidArgument("Index out ouf bound. `bin` (", bin, + ") must be less then the dimension size (", + out.dimension(1), ").")); if (bin < size) { if (binary_output_) { out(batch, bin) = T(1); @@ -411,6 +427,9 @@ class RaggedBincountOp : public OpKernel { const auto weights = ctx->input(3).flat(); const int64 weights_size = weights.size(); + OP_REQUIRES(ctx, size_t.dims() == 0, + errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_t.dims())); Tidx size = size_t.scalar()(); OP_REQUIRES( ctx, size >= 0, diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc index 008962c33ecb10..3da2efd3530432 100644 --- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc @@ -37,7 +37,7 @@ limitations under the License. namespace tensorflow { static void ConvertVectorsToMatrices( - const OpInputList bucketized_features_list, + OpKernelContext* const context, const OpInputList bucketized_features_list, std::vector::ConstMatrix>& bucketized_features) { for (const Tensor& tensor : bucketized_features_list) { if (tensor.dims() == 1) { @@ -45,6 +45,10 @@ static void ConvertVectorsToMatrices( bucketized_features.emplace_back( TTypes::ConstMatrix(v.data(), v.size(), 1)); } else { + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(tensor.shape()), + errors::Internal("Cannot use tensor as matrix, expected " + "vector or matrix, received shape ", + tensor.shape().DebugString())); bucketized_features.emplace_back(tensor.matrix()); } } @@ -58,6 +62,9 @@ class BoostedTreesTrainingPredictOp : public OpKernel { public: explicit BoostedTreesTrainingPredictOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -76,17 +83,26 @@ class BoostedTreesTrainingPredictOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); const Tensor* cached_tree_ids_t; OP_REQUIRES_OK(context, context->input("cached_tree_ids", &cached_tree_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(cached_tree_ids_t->shape()), + errors::InvalidArgument( + "cached_tree_ids must be a vector, received shape ", + cached_tree_ids_t->shape().DebugString())); const auto cached_tree_ids = cached_tree_ids_t->vec(); const Tensor* cached_node_ids_t; OP_REQUIRES_OK(context, context->input("cached_node_ids", &cached_node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(cached_node_ids_t->shape()), + errors::InvalidArgument( + "cached_node_ids must be a vector, received shape ", + cached_node_ids_t->shape().DebugString())); const auto cached_node_ids = cached_node_ids_t->vec(); // Allocate outputs. @@ -118,9 +134,9 @@ class BoostedTreesTrainingPredictOp : public OpKernel { output_partial_logits.setZero(); } else { output_tree_ids.setConstant(latest_tree); - auto do_work = [&resource, &bucketized_features, &cached_tree_ids, - &cached_node_ids, &output_partial_logits, - &output_node_ids, latest_tree, + auto do_work = [&context, &resource, &bucketized_features, + &cached_tree_ids, &cached_node_ids, + &output_partial_logits, &output_node_ids, latest_tree, this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { int32 tree_id = cached_tree_ids(i); @@ -138,7 +154,11 @@ class BoostedTreesTrainingPredictOp : public OpKernel { // node's value. The following logic handles both of these cases. const auto& node_logits = resource->node_value(tree_id, node_id); if (!node_logits.empty()) { - DCHECK_EQ(node_logits.size(), logits_dimension_); + OP_REQUIRES( + context, node_logits.size() == logits_dimension_, + errors::Internal( + "Expected node_logits.size() == logits_dimension_, got ", + node_logits.size(), " vs ", logits_dimension_)); for (int32 j = 0; j < logits_dimension_; ++j) { partial_tree_logits[j] -= node_logits[j]; } @@ -151,7 +171,11 @@ class BoostedTreesTrainingPredictOp : public OpKernel { while (true) { if (resource->is_leaf(tree_id, node_id)) { const auto& leaf_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(leaf_logits.size(), logits_dimension_); + OP_REQUIRES( + context, leaf_logits.size() == logits_dimension_, + errors::Internal( + "Expected leaf_logits.size() == logits_dimension_, got ", + leaf_logits.size(), " vs ", logits_dimension_)); // Tree is done const float tree_weight = resource->GetTreeWeight(tree_id); for (int32 j = 0; j < logits_dimension_; ++j) { @@ -201,6 +225,9 @@ class BoostedTreesPredictOp : public OpKernel { public: explicit BoostedTreesPredictOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -219,7 +246,8 @@ class BoostedTreesPredictOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // Allocate outputs. @@ -236,8 +264,8 @@ class BoostedTreesPredictOp : public OpKernel { } const int32 last_tree = resource->num_trees() - 1; - auto do_work = [&resource, &bucketized_features, &output_logits, last_tree, - this](int64 start, int64 end) { + auto do_work = [&context, &resource, &bucketized_features, &output_logits, + last_tree, this](int64_t start, int64_t end) { for (int32 i = start; i < end; ++i) { std::vector tree_logits(logits_dimension_, 0.0); int32 tree_id = 0; @@ -246,7 +274,11 @@ class BoostedTreesPredictOp : public OpKernel { if (resource->is_leaf(tree_id, node_id)) { const float tree_weight = resource->GetTreeWeight(tree_id); const auto& leaf_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(leaf_logits.size(), logits_dimension_); + OP_REQUIRES( + context, leaf_logits.size() == logits_dimension_, + errors::Internal( + "Expected leaf_logits.size() == logits_dimension_, got ", + leaf_logits.size(), " vs ", logits_dimension_)); for (int32 j = 0; j < logits_dimension_; ++j) { tree_logits[j] += tree_weight * leaf_logits[j]; } @@ -298,6 +330,9 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { explicit BoostedTreesExampleDebugOutputsOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features", &num_bucketized_features_)); OP_REQUIRES_OK(context, @@ -319,7 +354,8 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { &bucketized_features_list)); std::vector::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); - ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); + ConvertVectorsToMatrices(context, bucketized_features_list, + bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // We need to get the feature ids used for splitting and the logits after @@ -339,14 +375,16 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { // features used to split and the associated logits at each point along the // path. Note: feature_ids has one less value than logits_path because the // first value of each logit path will be the bias. - auto do_work = [&resource, &bucketized_features, &output_debug_info, - last_tree](int64 start, int64 end) { + auto do_work = [&context, &resource, &bucketized_features, + &output_debug_info, last_tree](int64_t start, int64_t end) { for (int32 i = start; i < end; ++i) { // Proto to store debug outputs, per example. boosted_trees::DebugOutput example_debug_info; // Initial bias prediction. E.g., prediction based off training mean. const auto& tree_logits = resource->node_value(0, 0); - DCHECK_EQ(tree_logits.size(), 1); + OP_REQUIRES(context, tree_logits.size() == 1, + errors::Internal("Expected tree_logits.size() == 1, got ", + tree_logits.size())); float tree_logit = resource->GetTreeWeight(0) * tree_logits[0]; example_debug_info.add_logits_path(tree_logit); int32 node_id = 0; @@ -372,7 +410,10 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { node_id = resource->next_node(tree_id, node_id, i, bucketized_features); const auto& tree_logits = resource->node_value(tree_id, node_id); - DCHECK_EQ(tree_logits.size(), 1); + OP_REQUIRES( + context, tree_logits.size() == 1, + errors::Internal("Expected tree_logits.size() == 1, got ", + tree_logits.size())); tree_logit = resource->GetTreeWeight(tree_id) * tree_logits[0]; // Output logit incorporates sum of leaf logits from prior trees. example_debug_info.add_logits_path(tree_logit + past_trees_logit); diff --git a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc index 0065bdd66aa708..5d4fd8c6778ff8 100644 --- a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc @@ -98,6 +98,9 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { explicit BoostedTreesCreateQuantileStreamResourceOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kMaxElementsName, &max_elements_)); } @@ -108,6 +111,10 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { // disallowed. const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(epsilon_t->shape()), + errors::InvalidArgument( + "epsilon must be a scalar, got a tensor of shape ", + epsilon_t->shape().DebugString())); float epsilon = epsilon_t->scalar()(); OP_REQUIRES( context, epsilon > 0, @@ -115,7 +122,14 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { const Tensor* num_streams_t; OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_streams_t->shape()), + errors::InvalidArgument( + "num_streams must be a scalar, got a tensor of shape ", + num_streams_t->shape().DebugString())); int64 num_streams = num_streams_t->scalar()(); + OP_REQUIRES(context, num_streams >= 0, + errors::InvalidArgument( + "Num_streams input cannot be a negative integer")); auto result = new QuantileStreamResource(epsilon, max_elements_, num_streams); @@ -140,6 +154,9 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { explicit BoostedTreesMakeQuantileSummariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -153,7 +170,8 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { const Tensor* example_weights_t; OP_REQUIRES_OK(context, context->input(kExampleWeightsName, &example_weights_t)); - DCHECK(float_features_list.size() > 0) << "Got empty feature list"; + OP_REQUIRES(context, float_features_list.size() > 0, + errors::Internal("Got empty feature list")); auto example_weights = example_weights_t->flat(); const int64 weight_size = example_weights.size(); const int64 batch_size = float_features_list[0].flat().size(); @@ -163,6 +181,10 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { "Weights should be a single value or same size as features."))); const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(epsilon_t->shape()), + errors::InvalidArgument( + "epsilon must be a scalar, got a tensor of shape ", + epsilon_t->shape().DebugString())); float epsilon = epsilon_t->scalar()(); OpOutputList summaries_output_list; @@ -187,7 +209,8 @@ class BoostedTreesMakeQuantileSummariesOp : public OpKernel { context, summaries_output_list.allocate( index, - TensorShape({static_cast(summary_entry_list.size()), 4}), + TensorShape( + {static_cast(summary_entry_list.size()), 4}), &output_t)); auto output = output_t->matrix(); for (auto row = 0; row < summary_entry_list.size(); row++) { @@ -220,6 +243,9 @@ class BoostedTreesFlushQuantileSummariesOp : public OpKernel { explicit BoostedTreesFlushQuantileSummariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -279,7 +305,11 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { public: explicit BoostedTreesQuantileStreamResourceAddSummariesOp( OpKernelConstruction* const context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { ResourceHandle handle; @@ -295,7 +325,10 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { OP_REQUIRES_OK(context, context->input_list(kSummariesName, &summaries_list)); int32 num_streams = stream_resource->num_streams(); - CHECK_EQ(static_cast(num_streams), summaries_list.size()); + OP_REQUIRES( + context, num_streams == summaries_list.size(), + errors::Internal("Expected num_streams == summaries_list.size(), got ", + num_streams, " vs ", summaries_list.size())); auto do_quantile_add_summary = [&](const int64 begin, const int64 end) { // Iterating all features. @@ -310,7 +343,10 @@ class BoostedTreesQuantileStreamResourceAddSummariesOp : public OpKernel { const auto summary_values = summaries.matrix(); const auto& tensor_shape = summaries.shape(); const int64 entries_size = tensor_shape.dim_size(0); - CHECK_EQ(tensor_shape.dim_size(1), 4); + OP_REQUIRES( + context, tensor_shape.dim_size(1) == 4, + errors::Internal("Expected tensor_shape.dim_size(1) == 4, got ", + tensor_shape.dim_size(1))); std::vector summary_entries; summary_entries.reserve(entries_size); for (int64 i = 0; i < entries_size; i++) { @@ -343,6 +379,9 @@ class BoostedTreesQuantileStreamResourceDeserializeOp : public OpKernel { explicit BoostedTreesQuantileStreamResourceDeserializeOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumStreamsName, &num_features_)); } @@ -362,6 +401,12 @@ class BoostedTreesQuantileStreamResourceDeserializeOp : public OpKernel { // Iterating over all streams. for (int64 stream_idx = begin; stream_idx < end; stream_idx++) { const Tensor& bucket_boundaries_t = bucket_boundaries_list[stream_idx]; + OP_REQUIRES( + context, TensorShapeUtils::IsVector(bucket_boundaries_t.shape()), + errors::InvalidArgument("bucket boundaries for each stream must be " + "a vector, received shape ", + bucket_boundaries_t.shape().DebugString(), + " for stream ", stream_idx)); const auto& bucket_boundaries = bucket_boundaries_t.vec(); std::vector result; result.reserve(bucket_boundaries.size()); @@ -393,6 +438,9 @@ class BoostedTreesQuantileStreamResourceFlushOp : public OpKernel { explicit BoostedTreesQuantileStreamResourceFlushOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kGenerateQuantiles, &generate_quantiles_)); } @@ -409,6 +457,10 @@ class BoostedTreesQuantileStreamResourceFlushOp : public OpKernel { const Tensor* num_buckets_t; OP_REQUIRES_OK(context, context->input(kNumBucketsName, &num_buckets_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_buckets_t->shape()), + errors::InvalidArgument( + "num_buckets must be a scalar, got a tensor of shape ", + num_buckets_t->shape().DebugString())); const int64 num_buckets = num_buckets_t->scalar()(); const int64 num_streams = stream_resource->num_streams(); @@ -449,6 +501,9 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp explicit BoostedTreesQuantileStreamResourceGetBucketBoundariesOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } @@ -463,7 +518,9 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp mutex_lock l(*stream_resource->mutex()); const int64 num_streams = stream_resource->num_streams(); - CHECK_EQ(num_features_, num_streams); + OP_REQUIRES(context, num_streams == num_features_, + errors::Internal("Expected num_streams == num_features_, got ", + num_streams, " vs ", num_features_)); OpOutputList bucket_boundaries_list; OP_REQUIRES_OK(context, context->output_list(kBucketBoundariesName, &bucket_boundaries_list)); @@ -473,10 +530,10 @@ class BoostedTreesQuantileStreamResourceGetBucketBoundariesOp for (int64 stream_idx = begin; stream_idx < end; stream_idx++) { const auto& boundaries = stream_resource->boundaries(stream_idx); Tensor* bucket_boundaries_t = nullptr; - OP_REQUIRES_OK(context, - bucket_boundaries_list.allocate( - stream_idx, {static_cast(boundaries.size())}, - &bucket_boundaries_t)); + OP_REQUIRES_OK( + context, bucket_boundaries_list.allocate( + stream_idx, {static_cast(boundaries.size())}, + &bucket_boundaries_t)); auto* quantiles_flat = bucket_boundaries_t->flat().data(); memcpy(quantiles_flat, boundaries.data(), sizeof(float) * boundaries.size()); @@ -507,6 +564,9 @@ class BoostedTreesBucketizeOp : public OpKernel { public: explicit BoostedTreesBucketizeOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_)); } diff --git a/tensorflow/core/kernels/boosted_trees/resource_ops.cc b/tensorflow/core/kernels/boosted_trees/resource_ops.cc index ac1fb5652da5f9..2e55efb19dc597 100644 --- a/tensorflow/core/kernels/boosted_trees/resource_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/resource_ops.cc @@ -36,23 +36,38 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesCreateEnsembleOp : public OpKernel { public: explicit BoostedTreesCreateEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { // Get the stamp token. const Tensor* stamp_token_t; OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(stamp_token_t->shape()), + errors::InvalidArgument( + "stamp_token must be a scalar, got a tensor of shape ", + stamp_token_t->shape().DebugString())); int64 stamp_token = stamp_token_t->scalar()(); // Get the tree ensemble proto. const Tensor* tree_ensemble_serialized_t; OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized", &tree_ensemble_serialized_t)); + OP_REQUIRES( + context, + TensorShapeUtils::IsScalar(tree_ensemble_serialized_t->shape()), + errors::InvalidArgument( + "tree_ensemble_serialized must be a scalar, got a tensor of shape ", + tree_ensemble_serialized_t->shape().DebugString())); std::unique_ptr result( new BoostedTreesEnsembleResource()); if (!result->InitFromSerialized( tree_ensemble_serialized_t->scalar()(), stamp_token)) { result->Unref(); + result.release(); // Needed due to the `->Unref` above, to prevent UAF OP_REQUIRES( context, false, errors::InvalidArgument("Unable to parse tree ensemble proto.")); @@ -75,7 +90,11 @@ REGISTER_KERNEL_BUILDER(Name("BoostedTreesCreateEnsemble").Device(DEVICE_CPU), class BoostedTreesGetEnsembleStatesOp : public OpKernel { public: explicit BoostedTreesGetEnsembleStatesOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { // Looks up the resource. @@ -138,7 +157,11 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesSerializeEnsembleOp : public OpKernel { public: explicit BoostedTreesSerializeEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { core::RefCountPtr tree_ensemble_resource; @@ -165,7 +188,11 @@ REGISTER_KERNEL_BUILDER( class BoostedTreesDeserializeEnsembleOp : public OpKernel { public: explicit BoostedTreesDeserializeEnsembleOp(OpKernelConstruction* context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* context) override { core::RefCountPtr tree_ensemble_resource; @@ -176,12 +203,22 @@ class BoostedTreesDeserializeEnsembleOp : public OpKernel { // Get the stamp token. const Tensor* stamp_token_t; OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(stamp_token_t->shape()), + errors::InvalidArgument( + "stamp_token must be a scalar, got a tensor of shape ", + stamp_token_t->shape().DebugString())); int64 stamp_token = stamp_token_t->scalar()(); // Get the tree ensemble proto. const Tensor* tree_ensemble_serialized_t; OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized", &tree_ensemble_serialized_t)); + OP_REQUIRES( + context, + TensorShapeUtils::IsScalar(tree_ensemble_serialized_t->shape()), + errors::InvalidArgument( + "tree_ensemble_serialized must be a scalar, got a tensor of shape ", + tree_ensemble_serialized_t->shape().DebugString())); // Deallocate all the previous objects on the resource. tree_ensemble_resource->Reset(); OP_REQUIRES( diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index 851e5b78e847b7..9090876afc8681 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include +#include #include #include "third_party/eigen3/Eigen/Core" @@ -22,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h" #include "tensorflow/core/kernels/boosted_trees/tree_helper.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { @@ -43,6 +45,9 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { explicit BoostedTreesCalculateBestGainsPerFeatureOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } @@ -51,6 +56,16 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES( + context, node_id_range_t->dims() == 1, + errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " + "given node_id_range has dims of ", + node_id_range_t->dims())); + OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, + errors::InvalidArgument( + "node_id_range must be a rank 1 tensor with shape=[2], but " + "given node_id_range has shape ", + node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive @@ -60,7 +75,10 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { &stats_summary_list)); const int64 num_buckets = stats_summary_list[0].dim_size(1); // Check for single logit: 1 gradient + 1 hessian value. - DCHECK_EQ(stats_summary_list[0].dim_size(2), 2); + OP_REQUIRES(context, stats_summary_list[0].dim_size(2) == 2, + errors::InvalidArgument("stats_summary_list[0] must have " + "exactly 2 dimensions, obtained: ", + stats_summary_list[0].dim_size(2))); std::vector::ConstTensor> stats_summary; stats_summary.reserve(stats_summary_list.size()); for (const auto& tensor : stats_summary_list) { @@ -68,17 +86,33 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { } const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); // Allocate output lists of tensors: @@ -236,6 +270,9 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { explicit BoostedTreesCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } @@ -244,12 +281,22 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES( + context, node_id_range_t->NumElements() == 2, + errors::InvalidArgument("node_id_range argument must have shape [2]")); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a vector, received shape ", + node_id_range_t->shape().DebugString())); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); + OP_REQUIRES( + context, stats_summary_t->shape().dims() == 4, + errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes::ConstTensor stats_summary = stats_summary_t->tensor(); const int32 feature_dims = stats_summary_t->dim_size(1); @@ -257,31 +304,51 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { const int32 num_buckets = stats_summary_t->dim_size(2) - 1; const int32 logits_dim = logits_dim_; const int32 hessian_dim = stats_summary_t->dim_size(3) - logits_dim; - DCHECK_GT(hessian_dim, 0); - DCHECK_LE(hessian_dim, logits_dim * logits_dim); + OP_REQUIRES(context, hessian_dim > 0, + errors::InvalidArgument("hessian dim should be < 0, got ", + hessian_dim)); + OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, + errors::InvalidArgument( + "hessian dim should be <= ", logits_dim * logits_dim, + " but got: ", hessian_dim)); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES(context, l1_t->NumElements() == 1, + errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar()(); - DCHECK_GE(l1, 0); + OP_REQUIRES(context, l1 >= 0, + errors::InvalidArgument("l1 = ", l1, " but it should be >= 0")); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. - DCHECK_EQ(l1, 0); + OP_REQUIRES( + context, l1 == 0, + errors::InvalidArgument( + "l1 != 0 is not yet supported for multi-class regularization")); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES(context, l2_t->NumElements() == 1, + errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar()(); - DCHECK_GE(l2, 0); + OP_REQUIRES(context, l2 >= 0, + errors::InvalidArgument("l2 = ", l2, " but it should be >= 0")); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES( + context, tree_complexity_t->NumElements() == 1, + errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES( + context, min_node_weight_t->NumElements() == 1, + errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -290,7 +357,7 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { std::vector output_thresholds; std::vector output_left_node_contribs; std::vector output_right_node_contribs; - std::vector output_split_types; + std::vector output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. @@ -559,6 +626,9 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { explicit BoostedTreesCalculateBestFeatureSplitV2( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } @@ -567,6 +637,20 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a vector, received shape ", + node_id_range_t->shape().DebugString())); + OP_REQUIRES( + context, node_id_range_t->dims() == 1, + errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " + "given node_id_range has dims of ", + node_id_range_t->dims())); + OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, + errors::InvalidArgument( + "node_id_range must be a rank 1 tensor with shape=[2], but " + "given node_id_range has shape ", + node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // Inclusive. const int32 node_id_last = node_id_range(1); // Exclusive. @@ -577,19 +661,30 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { &stats_summaries_list)); // Infer dimensions of a stats_summary. - DCHECK_GT(stats_summaries_list.size(), 0); + OP_REQUIRES( + context, stats_summaries_list.size() >= 0, + errors::InvalidArgument("Got an empty list for stats_summaries_list")); const int32 feature_dims = stats_summaries_list[0].dim_size(1); // The last bucket is for default/missing value. const int32 num_buckets = stats_summaries_list[0].dim_size(2) - 1; const int32 logits_dim = logits_dim_; const int32 hessian_dim = stats_summaries_list[0].dim_size(3) - logits_dim; - DCHECK_GT(hessian_dim, 0); - DCHECK_LE(hessian_dim, logits_dim * logits_dim); + OP_REQUIRES(context, hessian_dim > 0, + errors::InvalidArgument("hessian dim should be < 0, got ", + hessian_dim)); + OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, + errors::InvalidArgument( + "hessian dim should be <= ", logits_dim * logits_dim, + " but got: ", hessian_dim)); // Vector of stats_summaries; each element is stats for feature of shape // [max_splits, feature_dim, num_buckets, logits_dim + hessian_dim]. std::vector::ConstTensor> stats_summaries; - DCHECK_EQ(stats_summaries_list.size(), num_features_); + OP_REQUIRES(context, stats_summaries_list.size() == num_features_, + errors::InvalidArgument( + "Invalid stats_summaries_list size, got ", + stats_summaries_list.size(), + " but expected to match num_features ", num_features_)); stats_summaries.reserve(num_features_); for (const auto& tensor : stats_summaries_list) { stats_summaries.emplace_back(tensor.tensor()); @@ -598,8 +693,15 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // Split types. const Tensor* split_types_t; OP_REQUIRES_OK(context, context->input("split_types", &split_types_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(split_types_t->shape()), + errors::InvalidArgument("split_types must be a vector, received shape ", + split_types_t->shape().DebugString())); const auto split_types = split_types_t->vec(); - DCHECK_EQ(split_types.size(), num_features_); + OP_REQUIRES(context, split_types.size() == num_features_, + errors::InvalidArgument( + "Invalid split_types size, got ", split_types.size(), + " but expected to match num_features ", num_features_)); // Validate. for (int i = 0; i < num_features_; ++i) { if (!(split_types(i) == kInequalitySplit || @@ -614,29 +716,59 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { const Tensor* candidate_feature_ids_t; OP_REQUIRES_OK(context, context->input("candidate_feature_ids", &candidate_feature_ids_t)); + OP_REQUIRES(context, + TensorShapeUtils::IsVector(candidate_feature_ids_t->shape()), + errors::InvalidArgument( + "candidate_feature_ids must be a vector, received shape ", + candidate_feature_ids_t->shape().DebugString())); const auto candidate_feature_ids = candidate_feature_ids_t->vec(); - DCHECK_EQ(candidate_feature_ids.size(), num_features_); + OP_REQUIRES(context, candidate_feature_ids.size() == num_features_, + errors::InvalidArgument( + "Invalid candidate_feature_ids size, got ", + candidate_feature_ids.size(), + " but expected to match num_features ", num_features_)); // L1, L2, tree_complexity, min_node_weight. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); - DCHECK_GE(l1, 0); + OP_REQUIRES(context, l1 >= 0, + errors::InvalidArgument("l1 = ", l1, " but it should be >= 0")); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. - DCHECK_EQ(l1, 0); + OP_REQUIRES( + context, l1 == 0, + errors::InvalidArgument( + "l1 != 0 is not yet supported for multi-class regularization")); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); - DCHECK_GE(l2, 0); + OP_REQUIRES(context, l2 >= 0, + errors::InvalidArgument("l2 = ", l2, " but it should be >= 0")); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -948,6 +1080,9 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { explicit BoostedTreesSparseCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; // TODO(crawles): Using logits_dim_ for multi-class split. OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); // TODO(tanzheny): Using this for equality split. @@ -958,44 +1093,83 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_id_range_t->shape()), + errors::InvalidArgument( + "node_id_range must be a scalar, got a tensor of shape ", + node_id_range_t->shape().DebugString())); const auto node_id_range = node_id_range_t->vec(); + OP_REQUIRES( + context, node_id_range.size() == 2, + errors::InvalidArgument("node_id_range should have 2 entries, got: ", + node_id_range.size())); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_indices_t; OP_REQUIRES_OK(context, context->input("stats_summary_indices", &stats_summary_indices_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(stats_summary_indices_t->shape()), + errors::InvalidArgument( + "stats_summary_indices must be a matrix, got a tensor of shape ", + stats_summary_indices_t->shape().DebugString())); const auto stats_summary_indices = stats_summary_indices_t->matrix(); const int32 num_sparse_entries = stats_summary_indices_t->dim_size(0); const Tensor* stats_summary_values_t; OP_REQUIRES_OK(context, context->input("stats_summary_values", &stats_summary_values_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(stats_summary_values_t->shape()), + errors::InvalidArgument( + "stats_summary_values must be a vector, got a tensor of shape ", + stats_summary_values_t->shape().DebugString())); const auto stats_summary_values = stats_summary_values_t->vec(); const Tensor* stats_summary_shape_t; OP_REQUIRES_OK( context, context->input("stats_summary_shape", &stats_summary_shape_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(stats_summary_shape_t->shape()), + errors::InvalidArgument( + "stats_summary_shape must be a vector, got a tensor of shape ", + stats_summary_shape_t->shape().DebugString())); const auto stats_summary_shape = stats_summary_shape_t->vec(); const int32 num_buckets = stats_summary_shape(2) - 1; const int32 stats_dims = stats_summary_shape(3); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(tree_complexity_t->shape()), + errors::InvalidArgument( + "tree_complexity must be a scalar, got a tensor of shape ", + tree_complexity_t->shape().DebugString())); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_node_weight_t->shape()), + errors::InvalidArgument( + "min_node_weight must be a scalar, got a tensor of shape ", + min_node_weight_t->shape().DebugString())); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -1020,11 +1194,25 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { f_map.clear(); } previous_node_id = node_id; - DCHECK_LE(node_id_first, node_id); - DCHECK_LT(node_id, node_id_last); + OP_REQUIRES( + context, node_id_first <= node_id && node_id < node_id_last, + errors::InvalidArgument("node_id = ", node_id, " which is not in [", + node_id_first, ", ", node_id_last, ")")); const int32 feature_dim = stats_summary_indices(idx, 1); const int32 bucket_id = stats_summary_indices(idx, 2); const int32 stat_dim = stats_summary_indices(idx, 3); + OP_REQUIRES(context, stat_dim < stats_dims, + errors::InvalidArgument( + "Stat dim, the sum of logits dim and hessian dim in " + "stats_summary_indices, cannot be greater than stats " + "dims, the last value in stats_summary_shape, which was ", + stats_dims, ". At index (", idx, + ", 4), stats_summary_indices contains value ", stat_dim)); + OP_REQUIRES(context, stat_dim >= 0, + errors::InvalidArgument( + "Stat dim, the sum of logits dim and hessian dim in " + "stats_summary_indices, should be >= 0, which was ", + stat_dim, " at index ", idx)); std::pair const& f_insert_result = f_map.insert( FeatureMapIterator::value_type(feature_dim, BucketMap())); auto& b_map = f_insert_result.first->second; @@ -1243,6 +1431,9 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { public: explicit BoostedTreesMakeStatsSummaryOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); @@ -1252,15 +1443,39 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { // node_ids const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument( + "node_ids must be a vector, got a tensor of shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); // gradients const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument( + "gradients must be a matrix, got a tensor of shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); + OP_REQUIRES( + context, node_ids.size() == gradients.dimension(0), + errors::InvalidArgument( + "node_ids size should match 0th dim of gradients. node ids " + "size: ", + node_ids.size(), ", gradients dim0: ", gradients.dimension(0))); // hessians const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument( + "hessians must be a matrix, got a tensor of shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); + OP_REQUIRES( + context, node_ids.size() == hessians.dimension(0), + errors::InvalidArgument( + "node_ids size should match 0th dim of hessians. node ids " + "size: ", + node_ids.size(), ", hessians dim0: ", hessians.dimension(0))); // bucketized_features OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features_list", @@ -1280,6 +1495,11 @@ class BoostedTreesMakeStatsSummaryOp : public OpKernel { // Partition by node, and then bucketize. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { const auto& features = bucketized_features_list[feature_idx].vec(); + OP_REQUIRES( + context, features.size() == node_ids.size(), + errors::InvalidArgument("feature ", feature_idx, + " should have same size as node_ids, got ", + features.size(), " and ", node_ids.size())); for (int i = 0; i < batch_size; ++i) { const int32 node = node_ids(i); const int32 bucket = features(i); @@ -1311,6 +1531,9 @@ class BoostedTreesAggregateStatsOp : public OpKernel { public: explicit BoostedTreesAggregateStatsOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } @@ -1319,21 +1542,44 @@ class BoostedTreesAggregateStatsOp : public OpKernel { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument( + "node_ids must be a vector, got a tensor of shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument( + "gradients must be a matrix, got a tensor of shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); + OP_REQUIRES( + context, node_ids.size() == gradients.dimension(0), + errors::InvalidArgument( + "node_ids size should match 0th dim of gradients. node ids " + "size: ", + node_ids.size(), ", gradients dim0: ", gradients.dimension(0))); + // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument( + "hessians must be a matrix, got a tensor of shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); // feature. const Tensor* feature_t; OP_REQUIRES_OK(context, context->input("feature", &feature_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(feature_t->shape()), + errors::InvalidArgument( + "feature must be a matrix, got a tensor of shape ", + feature_t->shape().DebugString())); const auto feature = feature_t->matrix(); // Infer batch size, feature dimension and stats dimension. @@ -1356,6 +1602,9 @@ class BoostedTreesAggregateStatsOp : public OpKernel { for (int i = 0; i < batch_size; ++i) { const int32 node = node_ids(i); + OP_REQUIRES(context, node >= 0, + errors::InvalidArgument( + "node_ids ", i, "th entry should be >=0, got: ", node)); for (int feature_dim = 0; feature_dim < feature_dims; ++feature_dim) { const int32 feature_value = feature(i, feature_dim); const int32 bucket = @@ -1486,7 +1735,8 @@ static void AddInstanceStatsToMap(const int32 instance, const int32 feature_dim, // Add statistics to StatsPartitionMap for bucket_id ranging from // (start_instance, start_feature_dim) to (end_instance, end_feature_dim), // inclusive on start and end instances, exclusive on end feature dim. -static void AddRangeStats(const int start_instance, const int end_instance, +static void AddRangeStats(OpKernelContext* const context, + const int start_instance, const int end_instance, const int start_feature_dim, const int end_feature_dim, StatsPartitionMap* stats_map, @@ -1495,9 +1745,15 @@ static void AddRangeStats(const int start_instance, const int end_instance, const TTypes::ConstVec& node_ids, const int32 feature_dims, const int32 bucket_id, const int32 logits_dims, const int32 stats_dims) { - DCHECK_LE(start_instance, end_instance); + OP_REQUIRES(context, start_instance <= end_instance, + errors::InvalidArgument( + "start_instance = ", start_instance, + " which is not at most end_instance=", end_instance)); if (start_instance == end_instance) { - DCHECK_LT(start_feature_dim, end_feature_dim); + OP_REQUIRES(context, start_feature_dim < end_feature_dim, + errors::InvalidArgument( + "start_feature_dim = ", start_feature_dim, + " which is not at most end_feature_dim=", end_feature_dim)); } for (int32 instance = start_instance; instance <= end_instance; ++instance) { const int32 start_f_dim = @@ -1516,6 +1772,9 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { explicit BoostedTreesSparseAggregateStatsOp( OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } @@ -1524,29 +1783,71 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(node_ids_t->shape()), + errors::InvalidArgument("node_ids must be a vector, received shape ", + node_ids_t->shape().DebugString())); const auto node_ids = node_ids_t->vec(); + const auto num_nodes = node_ids_t->NumElements(); + for (int i = 0; i < num_nodes; ++i) { + OP_REQUIRES( + context, node_ids(i) <= max_splits_, + errors::InvalidArgument( + "Nodes in node_ids must be at most max_splits. Node ", i, " is ", + node_ids(i), " which is greater than ", max_splits_)); + } // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(gradients_t->shape()), + errors::InvalidArgument("gradients must be a matrix, received shape ", + gradients_t->shape().DebugString())); const auto gradients = gradients_t->matrix(); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsMatrix(hessians_t->shape()), + errors::InvalidArgument("hessians must be a matrix, received shape ", + hessians_t->shape().DebugString())); const auto hessians = hessians_t->matrix(); // feature indices. const Tensor* feature_indices_t; OP_REQUIRES_OK(context, context->input("feature_indices", &feature_indices_t)); + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(feature_indices_t->shape()), + errors::InvalidArgument( + "feature_indices must be a matrix, received shape ", + feature_indices_t->shape().DebugString())); + OP_REQUIRES( + context, feature_indices_t->shape().dim_size(1) == 2, + errors::InvalidArgument( + "feature_indices must be a matrix of shape [?, 2], received shape ", + feature_indices_t->shape().DebugString())); const auto feature_indices = feature_indices_t->matrix(); // feature values. const Tensor* feature_values_t; OP_REQUIRES_OK(context, context->input("feature_values", &feature_values_t)); + OP_REQUIRES(context, TensorShapeUtils::IsVector(feature_values_t->shape()), + errors::InvalidArgument( + "feature_values must be a vector, received shape ", + feature_values_t->shape().DebugString())); const auto feature_values = feature_values_t->vec(); + const auto num_features = feature_values_t->NumElements(); + for (int i = 0; i < num_features; ++i) { + OP_REQUIRES( + context, feature_values(i) <= num_buckets_, + errors::InvalidArgument( + "Features in feature_values must be at most num_buckets. Node ", + i, " is ", feature_values(i), " which is greater than ", + num_buckets_)); + } // feature shape. const Tensor* feature_shape_t; @@ -1563,7 +1864,26 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { const int64 stats_dims = logits_dims + hessians_dims; const int64 num_sparse_entries = feature_indices_t->dim_size(0); const int32 feature_dims = feature_shape(1); - DCHECK_LE(num_sparse_entries, batch_size * feature_dims); + OP_REQUIRES(context, num_features == num_sparse_entries, + errors::InvalidArgument( + "Number of elements in feature_values must match number of " + "sparse entries in feature_indices. Got ", + num_features, " and ", num_sparse_entries)); + for (int i = 0; i < num_sparse_entries; ++i) { + const int32_t f_dim = feature_indices(i, 1); + OP_REQUIRES( + context, f_dim <= feature_dims, + errors::InvalidArgument( + "Got invalid feature index feature_indices(", i, "1) = ", f_dim, + " which is above ", feature_dims, + " (from feature_shape: ", feature_shape_t->DebugString(), ")")); + } + OP_REQUIRES(context, num_sparse_entries <= batch_size * feature_dims, + errors::InvalidArgument( + "feature_indices dim0 should be <= gradients dim0 * " + "feature_shape[1]. features_indices dim0: ", + num_sparse_entries, " gradients dim0: ", batch_size, + ", feature_shape[1]: ", feature_dims)); // Aggregate statistics info to map. StatsPartitionMap stats_map; @@ -1571,14 +1891,35 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { int prev_instance = 0; int prev_f_dim = -1; + if (num_sparse_entries > 0) { + OP_REQUIRES( + context, feature_indices(0, 0) >= 0, + errors::InvalidArgument("feature_indices should be non-negative but " + "got feature_indices(0, 0)=", + feature_indices(0, 0))); + } + for (int i = 0; i < num_sparse_entries; ++i) { // the instance number within a batch const int32 instance = feature_indices(i, 0); - DCHECK_LE(instance, batch_size); - DCHECK_GE(instance, prev_instance); + OP_REQUIRES(context, instance <= batch_size, + errors::InvalidArgument("feature_indices(", i, + "0) should be at most batch size (", + batch_size, " but got ", instance)); + OP_REQUIRES( + context, instance >= prev_instance, + errors::InvalidArgument( + "feature_indices should be increasing but got feature_indices(", + i, ", 0) < ", prev_instance, " (feature_indices(", i - 1, "0))")); // the node id within a tree. + // We don't need the node id here, we just validate that the `instance` + // is a valid index as this is needed later in the code. const int32 node_id = node_ids(instance); - DCHECK_LE(node_id, max_splits_); + OP_REQUIRES(context, instance < num_nodes, + errors::InvalidArgument("feature_indices(", i, + "0) is not a valid index in the " + "node_ids vector (must be less than ", + num_nodes, ", got ", instance, ")")); // the feature dimension. const int32 f_dim = feature_indices(i, 1); DCHECK_LE(f_dim, feature_dims); @@ -1589,8 +1930,8 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { // Add statistics for the missing entries into default bucket. // The last bucket is default bucket. const int missing_entry_bucket = num_buckets_; - AddRangeStats(prev_instance, instance, prev_f_dim, f_dim, &stats_map, - gradients, hessians, node_ids, feature_dims, + AddRangeStats(context, prev_instance, instance, prev_f_dim, f_dim, + &stats_map, gradients, hessians, node_ids, feature_dims, missing_entry_bucket, logits_dims, stats_dims); prev_instance = instance; prev_f_dim = f_dim; @@ -1599,9 +1940,9 @@ class BoostedTreesSparseAggregateStatsOp : public OpKernel { AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, &stats_map, gradients, hessians, node_ids); } - AddRangeStats(prev_instance, batch_size - 1, prev_f_dim, feature_dims, - &stats_map, gradients, hessians, node_ids, feature_dims, - num_buckets_, logits_dims, stats_dims); + AddRangeStats(context, prev_instance, batch_size - 1, prev_f_dim, + feature_dims, &stats_map, gradients, hessians, node_ids, + feature_dims, num_buckets_, logits_dims, stats_dims); // Serialize statistics info map to tensor output. const int64 num_slots = stats_map.size() * stats_dims; diff --git a/tensorflow/core/kernels/boosted_trees/training_ops.cc b/tensorflow/core/kernels/boosted_trees/training_ops.cc index e91677740e7694..b7ef1e7dbbc2a7 100644 --- a/tensorflow/core/kernels/boosted_trees/training_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/training_ops.cc @@ -35,6 +35,9 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { public: explicit BoostedTreesUpdateEnsembleOp(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); int32 pruning_index; @@ -68,14 +71,26 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { const Tensor* feature_ids_t; OP_REQUIRES_OK(context, context->input("feature_ids", &feature_ids_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(feature_ids_t->shape()), + errors::InvalidArgument("feature_ids must be a vector, received shape ", + feature_ids_t->shape().DebugString())); const auto feature_ids = feature_ids_t->vec(); const Tensor* max_depth_t; OP_REQUIRES_OK(context, context->input("max_depth", &max_depth_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_depth_t->shape()), + errors::InvalidArgument( + "max_depth must be a scalar, got a tensor of shape ", + max_depth_t->shape().DebugString())); const auto max_depth = max_depth_t->scalar()(); const Tensor* learning_rate_t; OP_REQUIRES_OK(context, context->input("learning_rate", &learning_rate_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(learning_rate_t->shape()), + errors::InvalidArgument( + "learning_rate must be a scalar, got a tensor of shape ", + learning_rate_t->shape().DebugString())); const auto learning_rate = learning_rate_t->scalar()(); // Op does not support multi-class, the V2 op below does however. int32 logits_dimension = 1; @@ -176,11 +191,50 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel { std::map* best_split_per_node) { // Find best split per node going through every feature candidate. for (int64 feature_idx = 0; feature_idx < num_features_; ++feature_idx) { + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(node_ids_list[feature_idx].shape()), + errors::InvalidArgument( + "Each node_id in node_ids_list must be a vector, received shape ", + node_ids_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& node_ids = node_ids_list[feature_idx].vec(); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(gains_list[feature_idx].shape()), + errors::InvalidArgument( + "Each gain in gains_list must be a vector, received shape ", + gains_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& gains = gains_list[feature_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(thresholds_list[feature_idx].shape()), + errors::InvalidArgument( + "Each threshold in thresholds_list must be a vector, received " + "shape ", + thresholds_list[feature_idx].shape().DebugString(), " at index ", + feature_idx)); const auto& thresholds = thresholds_list[feature_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsMatrix( + left_node_contribs_list[feature_idx].shape()), + errors::InvalidArgument( + "Each left_node_contribs in left_node_contribs_list must be a " + "matrix, received shape ", + left_node_contribs_list[feature_idx].shape().DebugString(), + " at index ", feature_idx)); const auto& left_node_contribs = left_node_contribs_list[feature_idx].matrix(); + OP_REQUIRES( + context, + TensorShapeUtils::IsMatrix( + right_node_contribs_list[feature_idx].shape()), + errors::InvalidArgument( + "Each right_node_contribs in right_node_contribs_list must be a " + "matrix, received shape ", + right_node_contribs_list[feature_idx].shape().DebugString(), + " at index ", feature_idx)); const auto& right_node_contribs = right_node_contribs_list[feature_idx].matrix(); @@ -234,6 +288,9 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { public: explicit BoostedTreesUpdateEnsembleV2Op(OpKernelConstruction* const context) : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_groups", &num_groups_)); } @@ -274,14 +331,26 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { const Tensor* max_depth_t; OP_REQUIRES_OK(context, context->input("max_depth", &max_depth_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_depth_t->shape()), + errors::InvalidArgument( + "max_depth must be a scalar, got a tensor of shape ", + max_depth_t->shape().DebugString())); const auto max_depth = max_depth_t->scalar()(); const Tensor* learning_rate_t; OP_REQUIRES_OK(context, context->input("learning_rate", &learning_rate_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(learning_rate_t->shape()), + errors::InvalidArgument( + "learning_rate must be a scalar, got a tensor of shape ", + learning_rate_t->shape().DebugString())); const auto learning_rate = learning_rate_t->scalar()(); const Tensor* pruning_mode_t; OP_REQUIRES_OK(context, context->input("pruning_mode", &pruning_mode_t)); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(pruning_mode_t->shape()), + errors::InvalidArgument( + "pruning_mode must be a scalar, got a tensor of shape ", + pruning_mode_t->shape().DebugString())); const auto pruning_mode = static_cast(pruning_mode_t->scalar()()); // Find best splits for each active node. @@ -327,7 +396,7 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { boosted_trees::SplitTypeWithDefault split_type_with_default; bool parsed = boosted_trees::SplitTypeWithDefault_Parse( split_type, &split_type_with_default); - DCHECK(parsed); + OP_REQUIRES(context, parsed, errors::Internal("Parse failed")); if (split_type_with_default == boosted_trees::EQUALITY_DEFAULT_RIGHT) { // Add equality split to the node. ensemble_resource->AddCategoricalSplitNode(current_tree, split_entry, @@ -396,15 +465,75 @@ class BoostedTreesUpdateEnsembleV2Op : public OpKernel { std::map* best_split_per_node) { // Find best split per node going through every feature candidate. for (int64 group_idx = 0; group_idx < num_groups_; ++group_idx) { + OP_REQUIRES( + context, TensorShapeUtils::IsVector(node_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each node_id in node_ids_list must be a vector, received shape ", + node_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& node_ids = node_ids_list[group_idx].vec(); + OP_REQUIRES( + context, TensorShapeUtils::IsVector(gains_list[group_idx].shape()), + errors::InvalidArgument( + "Each gain in gains_list must be a vector, received shape ", + gains_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& gains = gains_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(feature_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each feature_id in feature_ids_lists must be a vector, received " + "shape ", + feature_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& feature_ids = feature_ids_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(thresholds_list[group_idx].shape()), + errors::InvalidArgument( + "Each threshold in thresholds_list must be a vector, received " + "shape ", + thresholds_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& thresholds = thresholds_list[group_idx].vec(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(dimension_ids_list[group_idx].shape()), + errors::InvalidArgument( + "Each dimension_id in dimension_ids_list must be a vector, " + "received shape ", + dimension_ids_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& dimension_ids = dimension_ids_list[group_idx].vec(); + OP_REQUIRES(context, + TensorShapeUtils::IsMatrix( + left_node_contribs_list[group_idx].shape()), + errors::InvalidArgument( + "Each left_node_contribs in right_node_contribs_list " + "must be a matrix, received shape ", + left_node_contribs_list[group_idx].shape().DebugString(), + " at index ", group_idx)); const auto& left_node_contribs = left_node_contribs_list[group_idx].matrix(); + OP_REQUIRES(context, + TensorShapeUtils::IsMatrix( + right_node_contribs_list[group_idx].shape()), + errors::InvalidArgument( + "Each right_node_contribs in right_node_contribs_list " + "must be a matrix, received shape ", + right_node_contribs_list[group_idx].shape().DebugString(), + " at index ", group_idx)); const auto& right_node_contribs = right_node_contribs_list[group_idx].matrix(); + OP_REQUIRES( + context, + TensorShapeUtils::IsVector(split_types_list[group_idx].shape()), + errors::InvalidArgument( + "Each split_type in split_types_list must be a vector, received " + "shape ", + split_types_list[group_idx].shape().DebugString(), " at index ", + group_idx)); const auto& split_types = split_types_list[group_idx].vec(); for (size_t candidate_idx = 0; candidate_idx < node_ids.size(); @@ -457,7 +586,11 @@ REGISTER_KERNEL_BUILDER(Name("BoostedTreesUpdateEnsembleV2").Device(DEVICE_CPU), class BoostedTreesCenterBiasOp : public OpKernel { public: explicit BoostedTreesCenterBiasOp(OpKernelConstruction* const context) - : OpKernel(context) {} + : OpKernel(context) { + VLOG(1) << "Boosted Trees kernels in TF are deprecated. Please use " + << "TensorFlow Decision Forests instead " + << "(https://github.com/tensorflow/decision-forests).\n"; + } void Compute(OpKernelContext* const context) override { // Get decision tree ensemble. @@ -479,9 +612,17 @@ class BoostedTreesCenterBiasOp : public OpKernel { // Get the regularization options. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l1_t->shape()), + errors::InvalidArgument("l1 must be a scalar, got a tensor of shape ", + l1_t->shape().DebugString())); const auto l1 = l1_t->scalar()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES( + context, TensorShapeUtils::IsScalar(l2_t->shape()), + errors::InvalidArgument("l2 must be a scalar, got a tensor of shape ", + l2_t->shape().DebugString())); const auto l2 = l2_t->scalar()(); // For now, assume 1-dimensional weight on leaves. @@ -489,7 +630,8 @@ class BoostedTreesCenterBiasOp : public OpKernel { float unused_gain; // TODO(crawles): Support multiclass. - DCHECK_EQ(logits_dim, 1); + OP_REQUIRES(context, logits_dim == 1, + errors::Internal("Expected logits_dim == 1, got ", logits_dim)); Eigen::VectorXf gradients_mean(1); Eigen::VectorXf hessians_mean(1); gradients_mean[0] = mean_gradients_t->flat()(0); @@ -506,7 +648,9 @@ class BoostedTreesCenterBiasOp : public OpKernel { current_bias = logits; } else { const auto& current_biases = ensemble_resource->node_value(0, 0); - DCHECK_EQ(current_biases.size(), 1); + OP_REQUIRES(context, current_biases.size() == 1, + errors::Internal("Expected current_biases.size() == 1, got ", + current_biases.size())); current_bias = current_biases[0]; continue_centering = std::abs(logits / current_bias) > kMinDeltaForCenterBias; diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc index 9bacebe7d265dc..3c4860ba45f059 100644 --- a/tensorflow/core/kernels/conv_ops.cc +++ b/tensorflow/core/kernels/conv_ops.cc @@ -271,6 +271,12 @@ struct LaunchConv2DOp { " vs ", patch_depth)); return; } + if (filter.NumElements() <= 0) { + ctx->SetStatus( + errors::InvalidArgument("filter must not have zero elements " + "(i.e. all dimensions must be non-zero)")); + return; + } const int64 num_groups = in_depth / patch_depth; if (num_groups <= 0) { @@ -322,6 +328,10 @@ struct LaunchConv2DOp { "attempted to be run because the input depth of ", in_depth, " does not match the filter input depth of ", filter.dim_size(2))); + OP_REQUIRES( + ctx, filter.NumElements() > 0, + errors::InvalidArgument("filter must not have zero elements " + "(i.e. all dimensions must be non-zero)")); for (int64 explicit_padding : explicit_paddings) { if (!FastBoundsCheck(explicit_padding, std::numeric_limits::max())) { @@ -796,6 +806,11 @@ void LaunchConv2DOp::operator()( const int64 patch_cols = filter.dim_size(1); const int64 patch_depths = filter.dim_size(2); + OP_REQUIRES( + ctx, filter.NumElements() > 0, + errors::InvalidArgument("filter must not have zero elements " + "(i.e. all dimensions must be non-zero)")); + // If the filter in-depth (patch_depths) is 1 and smaller than the input // depth, it's a depthwise convolution. More generally, if the filter in-depth // divides but is smaller than the input depth, it is a grouped convolution. diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc index 505c55c7e6feaa..dc275bfb8281ef 100644 --- a/tensorflow/core/kernels/conv_ops_3d.cc +++ b/tensorflow/core/kernels/conv_ops_3d.cc @@ -153,6 +153,10 @@ class Conv3DOp : public BinaryOp { errors::InvalidArgument( "Input depth must be evenly divisible by filter depth: ", in_depth, " vs ", filter_depth)); + OP_REQUIRES( + context, filter.NumElements() > 0, + errors::InvalidArgument("filter must not have zero elements " + "(i.e. all dimensions must be non-zero)")); // Dimension order for these arrays is: z, y, x. std::array input_size = { diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index 40aa1fe458c1ee..74929d8bc18ce4 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + #include "absl/container/flat_hash_map.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" @@ -23,6 +25,9 @@ limitations under the License. namespace tensorflow { +// Don't allocate too large `BatchedMap` objects +static int kMaxBatches = std::numeric_limits::max(); + template using BatchedMap = std::vector>; @@ -185,6 +190,44 @@ class SparseCount : public OpKernel { errors::InvalidArgument( "Input indices must be a 2-dimensional tensor. Got: ", indices.shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(values.shape()), + errors::InvalidArgument("Input values must be a vector. Got: ", + values.shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(shape.shape()), + errors::InvalidArgument("Input shape must be a vector. Got: ", + shape.shape().DebugString())); + OP_REQUIRES(context, + values.shape().dim_size(0) == indices.shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices.", + "Got ", values.shape().dim_size(0), + " values, indices shape: ", indices.shape().DebugString())); + OP_REQUIRES( + context, shape.shape().dim_size(0) == indices.shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices.", + "Got ", shape.shape().dim_size(0), + " dimensions, indices shape: ", indices.shape().DebugString())); + OP_REQUIRES(context, shape.NumElements() > 0, + errors::InvalidArgument( + "The shape argument requires at least one element.")); + // Validate indices: each index must be valid for the corresponding + // dimension. This could be possibly done better. + const auto indices_values = indices.matrix(); + const auto shape_vector = shape.vec(); + int num_values = values.NumElements(); // same as first dim of indices + int rank = indices.shape().dim_size(1); + for (int i = 0; i < num_values; ++i) { + for (int j = 0; j < rank; ++j) { + OP_REQUIRES( + context, + indices_values(i, j) >= 0 && indices_values(i, j) < shape_vector(j), + errors::InvalidArgument( + "Invalid index value at ", i, ": dimension ", j, " has value ", + indices_values(i, j), " which is not in [0, ", shape_vector(j), + ") (as given by dense shape ", shape.DebugString())); + } + } if (use_weights) { OP_REQUIRES( @@ -195,14 +238,12 @@ class SparseCount : public OpKernel { "; values shape: ", values.shape().DebugString())); } - OP_REQUIRES(context, shape.NumElements() != 0, - errors::InvalidArgument( - "The shape argument requires at least one element.")); - bool is_1d = shape.NumElements() == 1; - auto shape_vector = shape.flat(); int num_batches = is_1d ? 1 : shape_vector(0); - int num_values = values.NumElements(); + OP_REQUIRES( + context, 0 < num_batches && num_batches < kMaxBatches, + errors::InvalidArgument("Cannot allocate ", num_batches, + " batches, is the dense shape too wide?")); for (int b = 0; b < shape_vector.size(); b++) { OP_REQUIRES(context, shape_vector(b) >= 0, @@ -217,7 +258,6 @@ class SparseCount : public OpKernel { "Got ", num_values, " values, indices shape: ", indices.shape().DebugString())); - const auto indices_values = indices.matrix(); const auto values_values = values.flat(); const auto weight_values = weights.flat(); diff --git a/tensorflow/core/kernels/cwise_ops_common.h b/tensorflow/core/kernels/cwise_ops_common.h index 9adc628421d046..27fcbf2f33973c 100644 --- a/tensorflow/core/kernels/cwise_ops_common.h +++ b/tensorflow/core/kernels/cwise_ops_common.h @@ -87,7 +87,17 @@ class BinaryOp : public BinaryOpShared { void Compute(OpKernelContext* ctx) override { const Tensor& input_0 = ctx->input(0); + OP_REQUIRES(ctx, input_0.dtype() == DataTypeToEnum::v(), + errors::InvalidArgument( + "Expected tensor of type ", + DataTypeString(DataTypeToEnum::v()), " but got type ", + DataTypeString(input_0.dtype()))); const Tensor& input_1 = ctx->input(1); + OP_REQUIRES(ctx, input_1.dtype() == DataTypeToEnum::v(), + errors::InvalidArgument( + "Expected tensor of type ", + DataTypeString(DataTypeToEnum::v()), " but got type ", + DataTypeString(input_1.dtype()))); const Device& eigen_device = ctx->eigen_device(); bool error = false; bool* const error_ptr = Functor::has_errors ? &error : nullptr; @@ -265,6 +275,11 @@ class SimpleBinaryOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& in0 = ctx->input(0); const Tensor& in1 = ctx->input(1); + OP_REQUIRES( + ctx, in0.NumElements() == in1.NumElements(), + errors::InvalidArgument("The two arguments to a cwise op must have " + "same number of elements, got ", + in0.NumElements(), " and ", in1.NumElements())); auto in0_flat = in0.flat(); auto in1_flat = in1.flat(); const Device& eigen_device = ctx->eigen_device(); diff --git a/tensorflow/core/kernels/data/experimental/compression_ops.cc b/tensorflow/core/kernels/data/experimental/compression_ops.cc index efa7018acb6293..8cc214671bd742 100644 --- a/tensorflow/core/kernels/data/experimental/compression_ops.cc +++ b/tensorflow/core/kernels/data/experimental/compression_ops.cc @@ -48,6 +48,11 @@ void UncompressElementOp::Compute(OpKernelContext* ctx) { Tensor tensor = ctx->input(0); const Variant& variant = tensor.scalar()(); const CompressedElement* compressed = variant.get(); + OP_REQUIRES( + ctx, compressed != nullptr, + errors::InvalidArgument( + "Input does not contain a compressed element. Instead got tensor ", + tensor.DebugString())); std::vector components; OP_REQUIRES_OK(ctx, UncompressElement(*compressed, &components)); diff --git a/tensorflow/core/kernels/data/experimental/io_ops.cc b/tensorflow/core/kernels/data/experimental/io_ops.cc index 7d976aa4d1f588..57e55c6e5cdfd0 100644 --- a/tensorflow/core/kernels/data/experimental/io_ops.cc +++ b/tensorflow/core/kernels/data/experimental/io_ops.cc @@ -253,7 +253,11 @@ class LoadDatasetOp::Dataset : public DatasetBase { explicit Iterator(const Params& params) : DatasetIterator(params) {} - ~Iterator() override { input_->Unref(); } + ~Iterator() override { + if (input_) { + input_->Unref(); + } + } Status Initialize(IteratorContext* ctx) override { mutex_lock l(mu_); @@ -331,7 +335,7 @@ class LoadDatasetOp::Dataset : public DatasetBase { } mutex mu_; - DatasetBase* input_ TF_GUARDED_BY(mu_); + DatasetBase* input_ TF_GUARDED_BY(mu_) = nullptr; std::unique_ptr input_impl_ TF_GUARDED_BY(mu_); std::unique_ptr instantiated_captured_func_; }; diff --git a/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc b/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc index 464c049743a76b..1a7da98987fba9 100644 --- a/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/threadpool_dataset_op.cc @@ -39,6 +39,22 @@ namespace experimental { PrivateThreadPoolDatasetOp::kDatasetType; /* static */ constexpr const char* const PrivateThreadPoolDatasetOp::kDatasetOp; +namespace { +// To prevent integer overflow issues when allocating threadpool memory for an +// unreasonable number of threads. +constexpr int kThreadLimit = 65536; + +Status ValidateNumThreads(int32_t num_threads) { + if (num_threads < 0) { + return errors::InvalidArgument("`num_threads` must be >= 0"); + } + if (num_threads >= kThreadLimit) { + return errors::InvalidArgument("`num_threads` must be < ", kThreadLimit); + } + return Status::OK(); +} +} // namespace + class ThreadPoolResource : public ResourceBase { public: ThreadPoolResource(Env* env, const ThreadOptions& thread_options, @@ -83,9 +99,7 @@ class ThreadPoolHandleOp : public OpKernel { OP_REQUIRES_OK(ctx, ctx->GetAttr("num_threads", &num_threads_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("max_intra_op_parallelism", &max_intra_op_parallelism_)); - OP_REQUIRES( - ctx, num_threads_ > 0, - errors::InvalidArgument("`num_threads` must be greater than zero.")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads_)); } // The resource is deleted from the resource manager only when it is private @@ -530,8 +544,7 @@ void PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(OpKernelContext* ctx, DatasetBase* input, int32 num_threads, DatasetBase** output) { - OP_REQUIRES(ctx, num_threads >= 0, - errors::InvalidArgument("`num_threads` must be >= 0")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads)); *output = new Dataset(ctx, DatasetContext(DatasetContext::Params( {PrivateThreadPoolDatasetOp::kDatasetType, @@ -545,8 +558,7 @@ void PrivateThreadPoolDatasetOp::MakeDataset(OpKernelContext* ctx, int64 num_threads = 0; OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "num_threads", &num_threads)); - OP_REQUIRES(ctx, num_threads >= 0, - errors::InvalidArgument("`num_threads` must be >= 0")); + OP_REQUIRES_OK(ctx, ValidateNumThreads(num_threads)); *output = new Dataset(ctx, input, num_threads); } diff --git a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc index bfa894cd473b40..56401bb91f5753 100644 --- a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc +++ b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/core/framework/function_handle_cache.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" +#include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/data/dataset_utils.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/threadpool.h" @@ -87,8 +88,20 @@ class ToTFRecordOp : public AsyncOpKernel { TF_RETURN_IF_ERROR(dataset->MakeIterator( &iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator)); + const int num_output_dtypes = dataset->output_dtypes().size(); + if (num_output_dtypes != 1) { + return errors::InvalidArgument( + "ToTFRecordOp currently only support datasets of 1 single column, ", + "but got ", num_output_dtypes); + } + const DataType dt = dataset->output_dtypes()[0]; + if (dt != DT_STRING) { + return errors::InvalidArgument( + "ToTFRecordOp currently only supports DT_STRING dataypes, but got ", + DataTypeString(dt)); + } std::vector components; - components.reserve(dataset->output_dtypes().size()); + components.reserve(num_output_dtypes); bool end_of_sequence; do { TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc index 9efc9fddf58981..e0bf02ff3564e0 100644 --- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc +++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc @@ -238,17 +238,29 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel { OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), - errors::InvalidArgument( - "Input indices should be a matrix but received shape ", - indices->shape().DebugString())); + errors::InvalidArgument("Input indices must be a matrix. Got: ", + indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), - errors::InvalidArgument( - "Input values should be a vector but received shape ", - indices->shape().DebugString())); + errors::InvalidArgument("Input values must be a vector. Got: ", + values->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), + errors::InvalidArgument("Input shape must be a vector. Got: ", + dense_shape->shape().DebugString())); + OP_REQUIRES( + ctx, values->shape().dim_size(0) == indices->shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices. ", "Got ", + values->shape().dim_size(0), + " values, indices shape: ", indices->shape().DebugString())); + OP_REQUIRES( + ctx, dense_shape->shape().dim_size(0) == indices->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", dense_shape->shape().dim_size(0), + " dimensions, indices shape: ", indices->shape().DebugString())); + OP_REQUIRES(ctx, dense_shape->NumElements() > 0, errors::InvalidArgument( - "Input shape should be a vector but received shape ", - dense_shape->shape().DebugString())); + "The shape argument requires at least one element.")); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. @@ -267,11 +279,12 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel { previous_batch_index = next_batch_index; } gtl::InlinedVector std_order(dense_shape->NumElements(), 0); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape( + dense_shape->vec(), &shape)); sparse::SparseTensor tensor; - OP_REQUIRES_OK( - ctx, sparse::SparseTensor::Create( - *indices, *values, TensorShape(dense_shape->vec()), - std_order, &tensor)); + OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(*indices, *values, shape, + std_order, &tensor)); *output = new Dataset(ctx, std::move(tensor)); } diff --git a/tensorflow/core/kernels/dequantize_op.cc b/tensorflow/core/kernels/dequantize_op.cc index 7a90e0c340b093..ddb70167b58852 100644 --- a/tensorflow/core/kernels/dequantize_op.cc +++ b/tensorflow/core/kernels/dequantize_op.cc @@ -94,6 +94,11 @@ class DequantizeOp : public OpKernel { const Tensor& input_min_tensor = ctx->input(1); const Tensor& input_max_tensor = ctx->input(2); + OP_REQUIRES( + ctx, axis_ < input.dims(), + errors::InvalidArgument("Axis must be less than input dimension(", + input.dims(), "), got ", axis_)); + int num_slices = 1; if (axis_ > -1) { num_slices = input.dim_size(axis_); diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index 0452638a066795..818a5086ea7f46 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -271,6 +271,18 @@ class FractionalAvgPoolGradOp : public OpKernel { const int64 in_rows = orig_input_tensor_shape_flat(1); const int64 in_cols = orig_input_tensor_shape_flat(2); const int64 in_depth = orig_input_tensor_shape_flat(3); + OP_REQUIRES( + context, in_batch != 0, + errors::InvalidArgument("Batch dimension of input must not be 0")); + OP_REQUIRES( + context, in_rows != 0, + errors::InvalidArgument("Rows dimension of input must not be 0")); + OP_REQUIRES( + context, in_cols != 0, + errors::InvalidArgument("Columns dimension of input must not be 0")); + OP_REQUIRES( + context, in_depth != 0, + errors::InvalidArgument("Depth dimension of input must not be 0")); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape @@ -302,12 +314,22 @@ class FractionalAvgPoolGradOp : public OpKernel { int64 in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); + OP_REQUIRES(context, in_row_start >= 0 && in_row_end >= 0, + errors::InvalidArgument( + "Row sequence tensor values must not be negative, got ", + row_seq_tensor_flat)); + for (int64 c = 0; c < out_cols; ++c) { const int64 in_col_start = col_seq_tensor_flat(c); int64 in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); + OP_REQUIRES( + context, in_col_start >= 0 && in_col_end >= 0, + errors::InvalidArgument( + "Column sequence tensor values must not be negative, got ", + col_seq_tensor_flat)); const int64 num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64 out_index = (b * out_rows + r) * out_cols + c; diff --git a/tensorflow/core/kernels/fractional_max_pool_op.cc b/tensorflow/core/kernels/fractional_max_pool_op.cc index 1a2a783d135c54..b17463c5127fa0 100644 --- a/tensorflow/core/kernels/fractional_max_pool_op.cc +++ b/tensorflow/core/kernels/fractional_max_pool_op.cc @@ -83,6 +83,13 @@ class FractionalMaxPoolOp : public OpKernel { std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); + + OP_REQUIRES( + context, input_size[i] >= pooling_ratio_[i], + errors::InvalidArgument("Pooling ratio is higher than input " + "dimension size for dimension ", + i, ". Input dim size: ", input_size[i], + " pooling ratio: ", pooling_ratio_[i])); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc index 7b0932d953261c..8d99d25ccba6dd 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.cc +++ b/tensorflow/core/kernels/fused_batch_norm_op.cc @@ -1293,18 +1293,20 @@ class FusedBatchNormOpBase : public OpKernel { errors::InvalidArgument("offset must have the same number of elements " "as the channels of x, got ", offset.NumElements(), " and ", num_channels)); - if (estimated_mean.NumElements() != 0) { + if (!is_training_ || exponential_avg_factor_ != 1.) { + std::string prefix_msg = is_training_ ? "When exponential_avg_factor != 1" + : "When is_training=false"; OP_REQUIRES(context, estimated_mean.NumElements() == num_channels, errors::InvalidArgument( - "mean must be empty or have the same number of " - "elements as the channels of x, got ", + prefix_msg, + ", mean must have the same number " + "of elements as the channels of x, got ", estimated_mean.NumElements(), " and ", num_channels)); - } - if (estimated_variance.NumElements() != 0) { OP_REQUIRES(context, estimated_variance.NumElements() == num_channels, errors::InvalidArgument( - "variance must be empty or have the same number of " - "elements as the channels of x, got ", + prefix_msg, + ", variance must have the same " + "number of elements as the channels of x, got ", estimated_variance.NumElements(), " and ", num_channels)); } @@ -1454,6 +1456,11 @@ class FusedBatchNormGradOpBase : public OpKernel { errors::InvalidArgument( "saved variance must be 1-dimensional", saved_maybe_inv_var_or_pop_var.shape().DebugString())); + OP_REQUIRES( + context, x.shape() == y_backprop.shape(), + errors::InvalidArgument( + "x and y_backprop must have same shape, but x has shape ", + x.shape(), " and y_backprop has shape ", y_backprop.shape())); bool use_reshape = (x.dims() == 5); auto x_shape = x.shape(); TensorShape dest_shape; @@ -1471,6 +1478,23 @@ class FusedBatchNormGradOpBase : public OpKernel { errors::InvalidArgument("Error during tensor copy.")); } + const auto num_channels = GetTensorDim(x, tensor_format_, 'C'); + OP_REQUIRES( + context, scale.NumElements() == num_channels, + errors::InvalidArgument("scale must have the same number of elements " + "as the channels of x, got ", + scale.NumElements(), " and ", num_channels)); + OP_REQUIRES( + context, saved_mean_or_pop_mean.NumElements() == num_channels, + errors::InvalidArgument("reserve_space_1 must have the same number of " + "elements as the channels of x, got ", + scale.NumElements(), " and ", num_channels)); + OP_REQUIRES( + context, saved_maybe_inv_var_or_pop_var.NumElements() == num_channels, + errors::InvalidArgument("reserve_space_2 must have the same number of " + "elements as the channels of x, got ", + scale.NumElements(), " and ", num_channels)); + Tensor* x_backprop = nullptr; auto alloc_shape = use_reshape ? dest_shape : x_shape; OP_REQUIRES_OK(context, diff --git a/tensorflow/core/kernels/image/attention_ops.cc b/tensorflow/core/kernels/image/attention_ops.cc index 6e5e07a9fb1b3c..100be63d98e44d 100644 --- a/tensorflow/core/kernels/image/attention_ops.cc +++ b/tensorflow/core/kernels/image/attention_ops.cc @@ -85,11 +85,12 @@ class ExtractGlimpseOp : public OpKernel { "input must be a vector of size 2 (height, width)", window_size.shape().DebugString())); - const int64 output_height = window_size.tensor()(0); - const int64 output_width = window_size.tensor()(1); + const int64_t output_height = window_size.tensor()(0); + const int64_t output_width = window_size.tensor()(1); + TensorShape output_shape = input_shape; - output_shape.set_dim(1, output_height); - output_shape.set_dim(2, output_width); + OP_REQUIRES_OK(context, output_shape.SetDimWithStatus(1, output_height)); + OP_REQUIRES_OK(context, output_shape.SetDimWithStatus(2, output_width)); const Tensor& offsets = context->input(2); OP_REQUIRES(context, offsets.shape().dims() == 2, diff --git a/tensorflow/core/kernels/image/crop_and_resize_op.cc b/tensorflow/core/kernels/image/crop_and_resize_op.cc index 4efc4ae8846d17..65f972e1730752 100644 --- a/tensorflow/core/kernels/image/crop_and_resize_op.cc +++ b/tensorflow/core/kernels/image/crop_and_resize_op.cc @@ -169,14 +169,15 @@ class CropAndResizeOp : public AsyncOpKernel { context, crop_height > 0 && crop_width > 0, errors::InvalidArgument("crop dimensions must be positive"), done); + TensorShape shape; + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(num_boxes), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_height), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_width), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done); // Allocate output tensor. Tensor* output = nullptr; - OP_REQUIRES_OK_ASYNC( - context, - context->allocate_output( - 0, TensorShape({num_boxes, crop_height, crop_width, depth}), - &output), - done); + OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output), + done); auto compute_callback = [this, context, output]() { const Tensor& image = context->input(0); @@ -407,14 +408,15 @@ class CropAndResizeGradImageOp : public AsyncOpKernel { context, grads.dim_size(3) == depth, errors::InvalidArgument("image_size and grads are incompatible"), done); + TensorShape shape; + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(batch_size), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_height), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_width), done); + OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done); // Allocate output tensor. Tensor* output = nullptr; - OP_REQUIRES_OK_ASYNC( - context, - context->allocate_output( - 0, TensorShape({batch_size, image_height, image_width, depth}), - &output), - done); + OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output), + done); auto compute_callback = [this, context, output]() { const Tensor& grads = context->input(0); diff --git a/tensorflow/core/kernels/image/decode_image_op.cc b/tensorflow/core/kernels/image/decode_image_op.cc index 61b126fb81e3cb..242cc3ea0eb095 100644 --- a/tensorflow/core/kernels/image/decode_image_op.cc +++ b/tensorflow/core/kernels/image/decode_image_op.cc @@ -18,6 +18,8 @@ limitations under the License. #include #include +#include "tensorflow/core/lib/gtl/cleanup.h" + #define EIGEN_USE_THREADS #include "absl/strings/escaping.h" @@ -326,6 +328,16 @@ class DecodeImageV2Op : public OpKernel { context, png::CommonInitDecode(input, channels_, channel_bits, &decode), errors::InvalidArgument("Invalid PNG. Failed to initialize decoder.")); + // If we reach this point, then there is data in `decode` which must be + // freed by the time we end execution in this function. We cannot call + // `png::CommonFreeDecode()` before an `OP_REQUIRES` because if + // `OP_REQUIRES` constraint is satisfied then the data would be freed + // prematurely. Instead, let's use a `Cleanup` object. + auto cleanup = gtl::MakeCleanup([&decode]() { + std::cerr << "Cleanup called...\n"; + png::CommonFreeDecode(&decode); + }); + // Verify that width and height are not too large: // - verify width and height don't overflow int. // - width can later be multiplied by channels_ and sizeof(uint16), so @@ -339,22 +351,24 @@ class DecodeImageV2Op : public OpKernel { if (width != static_cast(decode.width) || width <= 0 || width >= (1LL << 27) || height != static_cast(decode.height) || height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) { - png::CommonFreeDecode(&decode); OP_REQUIRES(context, false, errors::InvalidArgument("PNG size too large for int: ", decode.width, " by ", decode.height)); } Tensor* output = nullptr; - Status status; // By the existing API, we support decoding PNG with `DecodeGif` op. // We need to make sure to return 4-D shapes when using `DecodeGif`. if (op_type_ == "DecodeGif") { - status = context->allocate_output( - 0, TensorShape({1, height, width, decode.channels}), &output); + OP_REQUIRES_OK( + context, + context->allocate_output( + 0, TensorShape({1, height, width, decode.channels}), &output)); } else { - status = context->allocate_output( - 0, TensorShape({height, width, decode.channels}), &output); + OP_REQUIRES_OK( + context, + context->allocate_output( + 0, TensorShape({height, width, decode.channels}), &output)); } if (op_type_ == "DecodeBmp") { @@ -374,9 +388,6 @@ class DecodeImageV2Op : public OpKernel { "detected PNG.")); } - if (!status.ok()) png::CommonFreeDecode(&decode); - OP_REQUIRES_OK(context, status); - if (data_type_ == DataType::DT_UINT8) { OP_REQUIRES( context, diff --git a/tensorflow/core/kernels/image/non_max_suppression_op.cc b/tensorflow/core/kernels/image/non_max_suppression_op.cc index 5cb721ed7105fa..1ec4c853f5f5b6 100644 --- a/tensorflow/core/kernels/image/non_max_suppression_op.cc +++ b/tensorflow/core/kernels/image/non_max_suppression_op.cc @@ -169,6 +169,8 @@ void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores, bool pad_to_max_output_size = false, int* ptr_num_valid_outputs = nullptr) { const int output_size = max_output_size.scalar()(); + OP_REQUIRES(context, output_size >= 0, + errors::InvalidArgument("output size must be non-negative")); std::vector scores_data(num_boxes); std::copy_n(scores.flat().data(), num_boxes, scores_data.begin()); @@ -768,6 +770,9 @@ class NonMaxSuppressionV4Op : public OpKernel { context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); + if (!context->status().ok()) { + return; + } // Allocate scalar output tensor for number of indices computed. Tensor* num_outputs_t = nullptr; @@ -845,6 +850,9 @@ class NonMaxSuppressionV5Op : public OpKernel { context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, soft_nms_sigma_val, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); + if (!context->status().ok()) { + return; + } // Allocate scalar output tensor for number of indices computed. Tensor* num_outputs_t = nullptr; @@ -930,6 +938,8 @@ class CombinedNonMaxSuppressionOp : public OpKernel { errors::InvalidArgument("max_size_per_class must be 0-D, got shape ", max_output_size.shape().DebugString())); const int max_size_per_class = max_output_size.scalar()(); + OP_REQUIRES(context, max_size_per_class > 0, + errors::InvalidArgument("max_size_per_class must be positive")); // max_total_size: scalar const Tensor& max_total_size = context->input(3); OP_REQUIRES( diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc index 19aa865c1fbe4d..df0d76ce633e9b 100644 --- a/tensorflow/core/kernels/immutable_constant_op.cc +++ b/tensorflow/core/kernels/immutable_constant_op.cc @@ -100,6 +100,9 @@ void ImmutableConstantOp::Compute(OpKernelContext* ctx) { OP_REQUIRES_OK(ctx, allocator->InitializeFromRegion(region_name_, ctx->env())); + OP_REQUIRES(ctx, dtype_ != DT_STRING, + errors::Unimplemented("Sorry, DT_STRING is not currently " + "supported for ImmutableConstOp.")); ctx->set_output(0, Tensor(allocator.get(), dtype_, shape_)); OP_REQUIRES_OK(ctx, allocator->allocation_status()); // Allocator is owned by the tensor from this point. diff --git a/tensorflow/core/kernels/immutable_constant_op_test.cc b/tensorflow/core/kernels/immutable_constant_op_test.cc index d52a8b55a35d79..40ce8918a39ade 100644 --- a/tensorflow/core/kernels/immutable_constant_op_test.cc +++ b/tensorflow/core/kernels/immutable_constant_op_test.cc @@ -146,7 +146,8 @@ TEST(ImmutableConstantOpTest, ExecutionError) { error::INTERNAL); } -Status CreateTempFile(Env* env, float value, uint64 size, string* filename) { +Status CreateTempFileFloat(Env* env, float value, uint64 size, + string* filename) { const string dir = testing::TmpDir(); *filename = io::JoinPath(dir, strings::StrCat("file_", value)); std::unique_ptr file; @@ -166,8 +167,8 @@ TEST(ImmutableConstantOpTest, FromFile) { auto root = Scope::NewRootScope().ExitOnError(); string two_file, three_file; - TF_ASSERT_OK(CreateTempFile(env, 2.0f, 1000, &two_file)); - TF_ASSERT_OK(CreateTempFile(env, 3.0f, 1000, &three_file)); + TF_ASSERT_OK(CreateTempFileFloat(env, 2.0f, 1000, &two_file)); + TF_ASSERT_OK(CreateTempFileFloat(env, 3.0f, 1000, &three_file)); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file); auto node2 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file); @@ -190,5 +191,39 @@ TEST(ImmutableConstantOpTest, FromFile) { EXPECT_EQ(outputs.front().flat()(2), 2.0f * 3.0f); } +Status CreateTempFileBadString(Env* env, char value, uint64 size, + const string suffix, string* filename) { + const string dir = testing::TmpDir(); + *filename = io::JoinPath(dir, strings::StrCat("file_", suffix)); + std::unique_ptr file; + TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file)); + TF_RETURN_IF_ERROR(file->Append(std::string(size, value))); + TF_RETURN_IF_ERROR(file->Close()); + return Status::OK(); +} + +TEST(ImmutableConstantOpTest, FromFileStringUnimplmented) { + const TensorShape kFileTensorShape({1}); + Env* env = Env::Default(); + auto root = Scope::NewRootScope().ExitOnError(); + + string bad_file; + TF_ASSERT_OK(CreateTempFileBadString(env, '\xe2', 128, "bad_e2", &bad_file)); + auto result = + ops::ImmutableConst(root, DT_STRING, kFileTensorShape, bad_file); + GraphDef graph_def; + TF_ASSERT_OK(root.ToGraphDef(&graph_def)); + SessionOptions session_options; + session_options.env = Env::Default(); + std::unique_ptr session(NewSession(session_options)); + ASSERT_TRUE(session != nullptr) << "Failed to create session"; + TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; + std::vector outputs; + // Check that the run returned error. + EXPECT_EQ( + session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(), + error::UNIMPLEMENTED); +} + } // namespace } // namespace tensorflow diff --git a/tensorflow/core/kernels/inplace_ops.cc b/tensorflow/core/kernels/inplace_ops.cc index 1849cb42883099..e72732e99b24f2 100644 --- a/tensorflow/core/kernels/inplace_ops.cc +++ b/tensorflow/core/kernels/inplace_ops.cc @@ -71,6 +71,15 @@ class ParallelConcatUpdate : public OpKernel { void Compute(OpKernelContext* ctx) override { auto value = ctx->input(0); + // Value should be at least rank 1. Also the 0th dimension should be + // at least loc_. + OP_REQUIRES(ctx, value.dims() >= 1, + errors::InvalidArgument("value should be at least rank 1.")); + OP_REQUIRES( + ctx, value.dim_size(0) > loc_, + errors::InvalidArgument("0th dimension of value = ", value.dim_size(0), + " is less than loc_=", loc_)); + auto update = ctx->input(1); OP_REQUIRES( @@ -225,7 +234,7 @@ class InplaceOpBase : public OpKernel { Tensor y = x; // This creates an alias intentionally. // Skip processing if tensors are empty. - if (x.NumElements() > 0 || v.NumElements() > 0) { + if (x.NumElements() > 0 && v.NumElements() > 0) { OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y)); } ctx->set_output(0, y); diff --git a/tensorflow/core/kernels/linalg/einsum_op_impl.h b/tensorflow/core/kernels/linalg/einsum_op_impl.h index ba01d108453b7c..ed7839457d2d49 100644 --- a/tensorflow/core/kernels/linalg/einsum_op_impl.h +++ b/tensorflow/core/kernels/linalg/einsum_op_impl.h @@ -153,6 +153,7 @@ struct EinsumHelper { input_has_ellipsis->resize(num_inputs); for (int i = 0; i < num_inputs; ++i) { input_label_counts->at(i).resize(num_labels); + input_has_ellipsis->at(i) = false; for (const int label : input_labels->at(i)) { if (label != kEllipsisLabel) input_label_counts->at(i)[label] += 1; @@ -161,6 +162,7 @@ struct EinsumHelper { } } output_label_counts->resize(num_labels); + *output_has_ellipsis = false; for (const int label : *output_labels) { if (label != kEllipsisLabel) output_label_counts->at(label) += 1; diff --git a/tensorflow/core/kernels/linalg/matrix_diag_op.cc b/tensorflow/core/kernels/linalg/matrix_diag_op.cc index d4eb589836a859..c797afa455675b 100644 --- a/tensorflow/core/kernels/linalg/matrix_diag_op.cc +++ b/tensorflow/core/kernels/linalg/matrix_diag_op.cc @@ -73,6 +73,9 @@ class MatrixDiagPartOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES(context, diag_index.NumElements() > 0, + errors::InvalidArgument( + "Expected diag_index to have at least 1 element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { @@ -86,7 +89,10 @@ class MatrixDiagPartOp : public OpKernel { upper_diag_index = diag_index.flat()(1); } } - padding_value = context->input(2).flat()(0); + const Tensor& padding_in = context->input(2); + OP_REQUIRES(context, padding_in.NumElements() == 1, + errors::InvalidArgument("Padding must be scalar.")); + padding_value = padding_in.flat()(0); } const TensorShape& input_shape = input.shape(); @@ -179,6 +185,9 @@ class MatrixDiagOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES(context, diag_index.NumElements() > 0, + errors::InvalidArgument( + "Expected diag_index to have at least 1 element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { diff --git a/tensorflow/core/kernels/linalg/matrix_set_diag_op.cc b/tensorflow/core/kernels/linalg/matrix_set_diag_op.cc index df32228d0f21bb..19cbc371f318ae 100644 --- a/tensorflow/core/kernels/linalg/matrix_set_diag_op.cc +++ b/tensorflow/core/kernels/linalg/matrix_set_diag_op.cc @@ -70,6 +70,9 @@ class MatrixSetDiagOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES( + context, diag_index.NumElements() > 0, + errors::InvalidArgument("diag_index must have at least one element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { diff --git a/tensorflow/core/kernels/linalg/matrix_solve_op.cc b/tensorflow/core/kernels/linalg/matrix_solve_op.cc index 70f02bddf9b785..aeb0203b4a337d 100644 --- a/tensorflow/core/kernels/linalg/matrix_solve_op.cc +++ b/tensorflow/core/kernels/linalg/matrix_solve_op.cc @@ -143,15 +143,22 @@ class MatrixSolveOpGpu : public AsyncOpKernel { done); OP_REQUIRES_ASYNC( context, input.dim_size(ndims - 2) == n, - errors::InvalidArgument("Input matrices must be squares, got", + errors::InvalidArgument("Input matrices must be squares, got ", input.dim_size(ndims - 2), " != ", n), done); OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n, errors::InvalidArgument( "Input matrix and right-hand side must have the " - "same number of rows, got", + "same number of rows, got ", n, " != ", rhs.dim_size(ndims - 2)), done); + for (int dim = 0; dim < ndims - 2; dim++) { + OP_REQUIRES_ASYNC( + context, input.dim_size(dim) == rhs.dim_size(dim), + errors::InvalidArgument( + "All input tensors must have the same outer dimensions."), + done); + } // Allocate output. Tensor* output; diff --git a/tensorflow/core/kernels/linalg/tridiagonal_matmul_op_gpu.cu.cc b/tensorflow/core/kernels/linalg/tridiagonal_matmul_op_gpu.cu.cc index a65db40d822abc..1f59d4311c3ab4 100644 --- a/tensorflow/core/kernels/linalg/tridiagonal_matmul_op_gpu.cu.cc +++ b/tensorflow/core/kernels/linalg/tridiagonal_matmul_op_gpu.cu.cc @@ -66,6 +66,12 @@ class TridiagonalMatMulOpGpu : public OpKernel { const Tensor& rhs = context->input(3); const int ndims = rhs.dims(); + OP_REQUIRES( + context, ndims >= 2, + errors::InvalidArgument("Input must have rank >= 2, but got ", ndims)); + OP_REQUIRES_OK(context, ValidateInputTensor(superdiag, "superdiag", rhs)); + OP_REQUIRES_OK(context, ValidateInputTensor(maindiag, "maindiag", rhs)); + OP_REQUIRES_OK(context, ValidateInputTensor(subdiag, "subdiag", rhs)); int64 batch_size = 1; for (int i = 0; i < ndims - 2; i++) { batch_size *= rhs.dim_size(i); @@ -85,6 +91,39 @@ class TridiagonalMatMulOpGpu : public OpKernel { maindiag.flat().data(), subdiag.flat().data(), rhs.flat().data(), output->flat().data())); } + + private: + Status ValidateInputTensor(const Tensor& tensor, + const std::string& tensor_name, + const Tensor& rhs) { + const int ndims = rhs.dims(); + if (tensor.dims() != ndims) { + return errors::InvalidArgument(tensor_name, + " must have same rank as rhs, but got ", + tensor.dims(), " and ", ndims); + } + for (int i = 0; i < ndims - 2; i++) { + if (tensor.dim_size(i) != rhs.dim_size(i)) { + return errors::InvalidArgument( + tensor_name, + " must have same outer dimensions as rhs, but for index ", i, + ", got ", tensor.dim_size(i), " and ", rhs.dim_size(i)); + } + } + if (tensor.dim_size(ndims - 2) != 1) { + return errors::InvalidArgument( + tensor_name, "'s second-to-last dimension must be 1, but got ", + tensor.dim_size(ndims - 2)); + } + if (tensor.dim_size(ndims - 1) != rhs.dim_size(ndims - 2)) { + return errors::InvalidArgument(tensor_name, + "'s last dimension size must be rhs's " + "second-to-last dimension size, but got ", + tensor.dim_size(ndims - 1), " and ", + rhs.dim_size(ndims - 2)); + } + return Status::OK(); + } }; REGISTER_LINALG_OP_GPU("TridiagonalMatMul", (TridiagonalMatMulOpGpu), diff --git a/tensorflow/core/kernels/list_kernels.cc b/tensorflow/core/kernels/list_kernels.cc index 9a2f373f5ce0cf..488e02337f707b 100644 --- a/tensorflow/core/kernels/list_kernels.cc +++ b/tensorflow/core/kernels/list_kernels.cc @@ -302,6 +302,10 @@ class TensorListReserve : public OpKernel { PartialTensorShape element_shape; OP_REQUIRES_OK(c, TensorShapeFromTensor(c->input(0), &element_shape)); int32 num_elements = c->input(1).scalar()(); + OP_REQUIRES(c, num_elements >= 0, + errors::InvalidArgument("The num_elements to reserve must be a " + "non negative number, but got ", + num_elements)); TensorList output; output.element_shape = element_shape; output.element_dtype = element_dtype_; diff --git a/tensorflow/core/kernels/map_stage_op.cc b/tensorflow/core/kernels/map_stage_op.cc index 89b760ea4d0c37..6cc5af6fb35664 100644 --- a/tensorflow/core/kernels/map_stage_op.cc +++ b/tensorflow/core/kernels/map_stage_op.cc @@ -210,9 +210,9 @@ class StagingMap : public ResourceBase { const OptionalTuple& tuple) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (tuple[index].has_value()) { - return Status(errors::InvalidArgument( + return errors::InvalidArgument( "The tensor for index '", index, "' for key '", key.scalar()(), - "' was already initialized '", dtypes_.size(), "'.")); + "' was already initialized '", dtypes_.size(), "'."); } return Status::OK(); @@ -220,6 +220,10 @@ class StagingMap : public ResourceBase { // Check that the indices are strictly ordered Status check_index_ordering(const Tensor& indices) { + if (indices.NumElements() == 0) { + return errors::InvalidArgument("Indices are empty"); + } + auto findices = indices.flat(); for (std::size_t i = 0; i < findices.dimension(0) - 1; ++i) { @@ -227,8 +231,7 @@ class StagingMap : public ResourceBase { continue; } - return Status( - errors::InvalidArgument("Indices are not strictly ordered")); + return errors::InvalidArgument("Indices are not strictly ordered"); } return Status::OK(); @@ -238,10 +241,10 @@ class StagingMap : public ResourceBase { Status check_memory_limit(std::size_t bytes) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (has_memory_limit() && bytes > memory_limit_) { - return Status(errors::ResourceExhausted( + return errors::ResourceExhausted( "Attempted to insert tensors with combined size of '", bytes, "' bytes into Staging Area with a memory limit of '", memory_limit_, - "'.")); + "'."); } return Status::OK(); @@ -527,6 +530,13 @@ class MapStageOp : public OpKernel { OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor)); OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor)); OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor)); + OP_REQUIRES(ctx, key_tensor->NumElements() > 0, + errors::InvalidArgument("key must not be empty")); + + OP_REQUIRES(ctx, key_tensor->NumElements() == 1, + errors::InvalidArgument( + "key must be an int64 scalar, got tensor with shape: ", + key_tensor->shape())); // Create copy for insertion into Staging Area Tensor key(*key_tensor); diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 01f303eb26980f..7e3085e8d4f570 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -74,6 +74,7 @@ static void SpatialMaxPoolWithArgMaxHelper( errors::Internal("SpatialMaxPoolWithArgMaxHelper requires Targmax " "to be int64 when input_backprop != nullptr")); } + if (tensor_in.NumElements() == 0 || output->NumElements() == 0) return; typedef Eigen::Map> ConstEigenMatrixMap; @@ -949,6 +950,10 @@ class MaxPoolingWithArgmaxOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); + OP_REQUIRES(context, tensor_in.dims() == 4, + errors::InvalidArgument("tensor_in must be 4-dimensional (2)")); + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("tensor_in must not be empty (2)")); PoolParameters params{context, ksize_, diff --git a/tensorflow/core/kernels/mkl/mkl_requantization_range_per_channel_op.cc b/tensorflow/core/kernels/mkl/mkl_requantization_range_per_channel_op.cc index 24dabb07ca067a..a38df2450d1942 100644 --- a/tensorflow/core/kernels/mkl/mkl_requantization_range_per_channel_op.cc +++ b/tensorflow/core/kernels/mkl/mkl_requantization_range_per_channel_op.cc @@ -57,6 +57,20 @@ class MklRequantizationRangePerChannelOp : public OpKernel { ctx, input_max.dim_size(0) == depth, errors::InvalidArgument("input_max has incorrect size, expected ", depth, " was ", input_max.dim_size(0))); + OP_REQUIRES( + ctx, input_min.NumElements() == depth, + errors::InvalidArgument("input_min must have the same number of " + "elements as input_max, got ", + input_min.NumElements(), " and ", depth)); + OP_REQUIRES(ctx, input.NumElements() > 0, + errors::InvalidArgument("input must not be empty")); + OP_REQUIRES(ctx, input.dims() == 4, + errors::InvalidArgument("input must be in NHWC format")); + OP_REQUIRES( + ctx, input.dim_size(3) == depth, + errors::InvalidArgument( + "input must have same number of channels as length of input_min: ", + input.dim_size(3), " vs ", depth)); const float* input_min_data = input_min.flat().data(); const float* input_max_data = input_max.flat().data(); diff --git a/tensorflow/core/kernels/mkl/mkl_requantize_per_channel_op.cc b/tensorflow/core/kernels/mkl/mkl_requantize_per_channel_op.cc index c0f9845cd4b084..6ffbd09b44f543 100644 --- a/tensorflow/core/kernels/mkl/mkl_requantize_per_channel_op.cc +++ b/tensorflow/core/kernels/mkl/mkl_requantize_per_channel_op.cc @@ -49,35 +49,45 @@ class MklRequantizePerChannelOp : public OpKernel { void Compute(OpKernelContext* ctx) override { try { const Tensor& input = ctx->input(kInputTensorIndex); + OP_REQUIRES( + ctx, input.dims() == 4, + errors::InvalidArgument("Current RequantizePerChannel operator" + "supports 4D tensors only.")); + const Tensor& input_min_vec = ctx->input(kInputMinVecIndex); + size_t depth = input_min_vec.NumElements(); float* input_min_vec_data = (float*)const_cast( static_cast(input_min_vec.flat().data())); + const Tensor& input_max_vec = ctx->input(kInputMaxVecIndex); + OP_REQUIRES( + ctx, input_max_vec.NumElements() == depth, + errors::InvalidArgument("input_max has incorrect size, expected ", + depth, " was ", input_max_vec.NumElements())); float* input_max_vec_data = (float*)const_cast( static_cast(input_max_vec.flat().data())); const Tensor& input_requested_min = ctx->input(this->kRequestMinIndex); + OP_REQUIRES( + ctx, input_requested_min.NumElements() == 1, + errors::InvalidArgument("requested_output_min must be a scalar")); const float input_requested_min_float = input_requested_min.flat()(0); + const Tensor& input_requested_max = ctx->input(this->kRequestMaxIndex); + OP_REQUIRES( + ctx, input_requested_min.NumElements() == 1, + errors::InvalidArgument("requested_output_max must be a scalar")); const float input_requested_max_float = input_requested_max.flat()(0); - size_t depth = input_min_vec.NumElements(); - OP_REQUIRES( - ctx, input.dims() == 4, - errors::InvalidArgument("Current RequantizePerChannel operator" - "supports 4D tensors only.")); - OP_REQUIRES( - ctx, input_min_vec.dim_size(0) == depth, - errors::InvalidArgument("input_min has incorrect size, expected ", - depth, " was ", input_min_vec.dim_size(0))); - OP_REQUIRES( - ctx, input_max_vec.dim_size(0) == depth, - errors::InvalidArgument("input_max has incorrect size, expected ", - depth, " was ", input_max_vec.dim_size(0))); - - if (out_type_ == DT_QINT8) DCHECK(input_requested_min_float < 0.0f); + if (out_type_ == DT_QINT8) { + OP_REQUIRES(ctx, input_requested_min_float < 0.0f, + errors::InvalidArgument( + "If out_type is QINT8, requested_output_max must be " + "non negative, got ", + input_requested_min_float)); + } const float factor = (out_type_ == DT_QINT8) ? 127.0f : 255.0f; const float requested_min_max = diff --git a/tensorflow/core/kernels/pad_op.cc b/tensorflow/core/kernels/pad_op.cc index 4a1d0cfc3e2374..b59db5a59a7bd3 100644 --- a/tensorflow/core/kernels/pad_op.cc +++ b/tensorflow/core/kernels/pad_op.cc @@ -84,8 +84,10 @@ class PadOp : public OpKernel { OP_REQUIRES(context, before_d >= 0 && after_d >= 0, errors::InvalidArgument("Paddings must be non-negative: ", before_d, " ", after_d)); - const int64 size_d = in0.dim_size(d); - output_shape.AddDim(before_d + size_d + after_d); + const int64_t size_d = in0.dim_size(d); + OP_REQUIRES_OK( + context, output_shape.AddDimWithStatus(before_d + size_d + after_d)); + } // If there is no padding to be done, forward the input to output. diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 56a55bc2ec87bf..2faf90bc072cad 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -141,6 +141,11 @@ class Pooling3DOp : public UnaryOp { OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); + bool non_negative = + std::all_of(ksize_.begin(), ksize_.end(), [](int k) { return k > 0; }); + OP_REQUIRES(context, non_negative, + errors::InvalidArgument("Sliding window ksize field must " + "have non-negative dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " diff --git a/tensorflow/core/kernels/pooling_ops_common.cc b/tensorflow/core/kernels/pooling_ops_common.cc index a0c07f31b3d872..6d0a176625f612 100644 --- a/tensorflow/core/kernels/pooling_ops_common.cc +++ b/tensorflow/core/kernels/pooling_ops_common.cc @@ -170,6 +170,8 @@ PoolParameters::PoolParameters(OpKernelContext* context, pad_depth = 0; out_depth = depth; } else { + OP_REQUIRES(context, depth_window > 0, + errors::InvalidArgument("depth_window must not be 0")); // Our current version of depthwise max pooling does not support // any padding, and expects the depth_window to equal the // depth_stride (no overlapping). diff --git a/tensorflow/core/kernels/pooling_ops_common.h b/tensorflow/core/kernels/pooling_ops_common.h index dacbb872cf04a7..642fb4b413a0ef 100644 --- a/tensorflow/core/kernels/pooling_ops_common.h +++ b/tensorflow/core/kernels/pooling_ops_common.h @@ -194,6 +194,9 @@ class MaxPoolingOp : public OpKernel { void SpatialMaxPool(OpKernelContext* context, Tensor* output, const Tensor& tensor_in, const PoolParameters& params, const Padding& padding) { + if (output->NumElements() == 0) { + return; + } // On GPU, use Eigen's Spatial Max Pooling. On CPU, use an // EigenMatrix version that is currently faster than Eigen's // Spatial MaxPooling implementation. @@ -447,6 +450,9 @@ class MaxPoolingV2Op : public OpKernel { void SpatialMaxPool(OpKernelContext* context, Tensor* output, const Tensor& tensor_in, const PoolParameters& params, const Padding& padding) { + if (output->NumElements() == 0) { + return; + } // On GPU, use Eigen's Spatial Max Pooling. On CPU, use an // EigenMatrix version that is currently faster than Eigen's // Spatial MaxPooling implementation. @@ -564,6 +570,9 @@ template void SpatialAvgPool(OpKernelContext* context, Tensor* output, const Tensor& input, const PoolParameters& params, const Padding& padding) { + if (output->NumElements() == 0) { + return; + } typedef Eigen::Map> ConstEigenMatrixMap; typedef Eigen::Map> diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 540d900f9f8696..d63a49a04be621 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -158,6 +158,13 @@ class QuantizeAndDequantizeV4GradientOp : public OpKernel { Tensor* input_backprop = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &input_backprop)); + OP_REQUIRES( + ctx, axis_ >= -1, + errors::InvalidArgument("Axis must be at least -1. Found ", axis_)); + OP_REQUIRES(ctx, (axis_ == -1 || axis_ < input.shape().dims()), + errors::InvalidArgument( + "Axis should be -1 or 0 or a positive value less than ", + input.shape().dims(), "but given axis value was ", axis_)); OP_REQUIRES( ctx, input.IsSameSize(gradient), diff --git a/tensorflow/core/kernels/quantize_op.cc b/tensorflow/core/kernels/quantize_op.cc index a523c4b9cd0249..098991e4f436d8 100644 --- a/tensorflow/core/kernels/quantize_op.cc +++ b/tensorflow/core/kernels/quantize_op.cc @@ -113,7 +113,50 @@ class QuantizeV2Op : public OpKernel { int num_slices = 1; if (axis_ > -1) { + OP_REQUIRES( + ctx, input.dims() > axis_, + errors::InvalidArgument( + "Axis is on a zero-based index, so its value must always be less " + "than number of input's dims, but given axis value was ", + axis_, " and input's dims was ", input.dims())); num_slices = input.dim_size(axis_); + OP_REQUIRES(ctx, input_min_range.dims() == 1, + errors::InvalidArgument( + "If axis is specified, min_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but min_range dims are ", + input_min_range.dims())); + OP_REQUIRES(ctx, input_min_range.dim_size(0) == num_slices, + errors::InvalidArgument( + "If axis is specified, min_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but min_range is a 1-D tensor of size ", + input_min_range.dim_size(0), + " and input's axis dimension is of size ", num_slices)); + OP_REQUIRES(ctx, input_max_range.dims() == 1, + errors::InvalidArgument( + "If axis is specified, max_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but max_range dims are ", + input_max_range.dims())); + OP_REQUIRES(ctx, input_max_range.dim_size(0) == num_slices, + errors::InvalidArgument( + "If axis is specified, max_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but max_range is a 1-D tensor of size ", + input_max_range.dim_size(0), + " and input's axis dimension is of size ", num_slices)); + } else { + OP_REQUIRES(ctx, input_min_range.NumElements() == 1, + errors::InvalidArgument( + "If axis is not specified, min_range must contain a " + "single float element, but it contains ", + input_min_range.NumElements(), " elements")); + OP_REQUIRES(ctx, input_max_range.NumElements() == 1, + errors::InvalidArgument( + "If axis is not specified, max_range must contain a " + "single float element, but it contains ", + input_max_range.NumElements(), " elements")); } const TensorShape& minmax_shape = ctx->input(1).shape(); diff --git a/tensorflow/core/kernels/quantized_pooling_ops.cc b/tensorflow/core/kernels/quantized_pooling_ops.cc index 663ceb0641e202..8f042e88f7aad2 100644 --- a/tensorflow/core/kernels/quantized_pooling_ops.cc +++ b/tensorflow/core/kernels/quantized_pooling_ops.cc @@ -15,6 +15,8 @@ limitations under the License. // See docs in ../ops/nn_ops.cc. +#include "tensorflow/core/framework/op_requires.h" +#include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" @@ -117,6 +119,18 @@ class QuantizedMaxPoolingOp : public MaxPoolingOp { : MaxPoolingOp(context) {} void Compute(OpKernelContext* context) override { + auto min_input_tensor = context->input(1); + auto max_input_tensor = context->input(2); + OP_REQUIRES( + context, min_input_tensor.NumElements() == 1, + errors::InvalidArgument( + "min_input must be a scalar float value, got tensor with shape ", + min_input_tensor.shape())); + OP_REQUIRES( + context, max_input_tensor.NumElements() == 1, + errors::InvalidArgument( + "max_input must be a scalar float value, got tensor with shape ", + max_input_tensor.shape())); const float min_input = context->input(1).flat()(0); const float max_input = context->input(2).flat()(0); MaxPoolingOp::Compute(context); diff --git a/tensorflow/core/kernels/ragged_gather_op.cc b/tensorflow/core/kernels/ragged_gather_op.cc index 3bf82cba050e3b..d6d51c770bbb7a 100644 --- a/tensorflow/core/kernels/ragged_gather_op.cc +++ b/tensorflow/core/kernels/ragged_gather_op.cc @@ -58,15 +58,21 @@ class RaggedGatherOpBase : public OpKernel { void Compute(OpKernelContext* context) override { // Get the input Tensors. + OpInputList params_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("params_nested_splits", ¶ms_nested_splits_in)); + OP_REQUIRES( + context, params_nested_splits_in.size() > 0, + errors::InvalidArgument("params_nested_splits must be non empty")); + const Tensor& params_dense_values_in = context->input(params_nested_splits_in.size()); const Tensor& indices_in = context->input(params_nested_splits_in.size() + 1); - DCHECK_GT(params_nested_splits_in.size(), 0); // Enforced by REGISTER_OP. + OP_REQUIRES(context, params_nested_splits_in[0].dims() > 0, + errors::InvalidArgument("Split tensors must not be scalars")); SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1; OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params)); diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc index d9993bb6d3907a..c481d90638e4e2 100644 --- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc @@ -174,7 +174,23 @@ Status NestedStackRaggedTensors( auto output_values_flat = output_ragged->mutable_values()->flat_outer_dims(); int values_index = 0; + + TensorShape expected_value_shape = component_values_shape; + expected_value_shape.RemoveDim(0); + for (int i = 0; i < ragged_components.size(); i++) { + // Check that the flat_values tensor shape is compatible. + TensorShape value_shape = ragged_components[i].values().shape(); + value_shape.RemoveDim(0); + if (value_shape != expected_value_shape) { + return errors::InvalidArgument( + "All flat_values must have compatible shapes. Shape at index 0: ", + expected_value_shape, ". Shape at index ", i, ": ", value_shape, + ". If you are using tf.map_fn, then you may need to specify an " + "explicit fn_output_signature with appropriate ragged_rank, and/or " + "convert output tensors to RaggedTensors."); + } + auto component_values_flat = ragged_components[i].values().flat_outer_dims(); int num_inner_elements = ragged_components[i].values().NumElements(); diff --git a/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc b/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc index 39b530f4a15ead..336a38fa58fc8b 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -38,7 +39,8 @@ class RaggedTensorToSparseOp : public OpKernel { OP_REQUIRES_OK( context, context->input_list("rt_nested_splits", &rt_nested_splits_in)); const int rt_nested_splits_len = rt_nested_splits_in.size(); - DCHECK_GT(rt_nested_splits_len, 0); // Enforced by REGISTER_OP. + OP_REQUIRES(context, rt_nested_splits_len > 0, + errors::InvalidArgument("rt_nested_splits must be non empty")); std::vector rt_nested_splits; rt_nested_splits.reserve(rt_nested_splits_len); for (int i = 0; i < rt_nested_splits_len; ++i) { @@ -161,6 +163,14 @@ class RaggedTensorToSparseOp : public OpKernel { if (rt_nested_splits[i](0) != 0) { return InvalidArgument("First value of ragged splits must be 0."); } + for (int j = 1; j < rt_nested_splits[i].size(); ++j) { + if (rt_nested_splits[i](j) < rt_nested_splits[i](j - 1)) { + return InvalidArgument( + "Ragged splits should be non decreasing, but we got ", + rt_nested_splits[i](j - 1), " followed by ", + rt_nested_splits[i](j)); + } + } if (i > 0) { SPLITS_TYPE last_split = rt_nested_splits[i - 1](rt_nested_splits[i - 1].size() - 1); diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index b79a07e67ba913..1749a6e24784d6 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -348,6 +348,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) { const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); + if (row_partition_types_.empty()) { + return errors::InvalidArgument("No row_partition_types given."); + } const RowPartitionType first_partition_type = row_partition_types_[0]; switch (first_partition_type) { case RowPartitionType::FIRST_DIM_SIZE: diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 687289cd38077a..ab86863e3a987f 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -157,6 +157,12 @@ class RaggedTensorToVariantOp : public OpKernel { return; } + // Checked here instead of at input in case batched_input_ is false + OP_REQUIRES(context, ragged_nested_splits_len > 0, + errors::InvalidArgument( + "rt_nested_splits must be a list of one or more, but " + "received rt_nested_splits of length 0.")); + // Unbatch the Ragged Tensor and encode the components. std::vector unbatched_ragged_input; auto batched_splits_top_vec = diff --git a/tensorflow/core/kernels/reshape_util.cc b/tensorflow/core/kernels/reshape_util.cc index d54c081ec90175..e2cc218d63e42d 100644 --- a/tensorflow/core/kernels/reshape_util.cc +++ b/tensorflow/core/kernels/reshape_util.cc @@ -23,8 +23,10 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" @@ -99,7 +101,9 @@ void ReshapeSparseTensor(OpKernelContext *context, target_shape_in.shape().DebugString())); const int64 output_rank = target_shape_in.NumElements(); - const TensorShape input_shape(input_shape_in.vec()); + TensorShape input_shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape( + input_shape_in.vec(), &input_shape)); const int64 dense_size = input_shape.num_elements(); const int64 nnz = input_indices_in.shape().dim_size(0); @@ -174,6 +178,12 @@ void ReshapeSparseTensor(OpKernelContext *context, TensorShape({nnz, output_rank}), &result_indices)); if (nnz > 0) { + OP_REQUIRES( + context, dense_size > 0 && product > 0, + errors::InvalidArgument( + "Input tensor has ", nnz, " non zero elements but input shape (", + input_shape.DebugString(), ") or output shape (", + output_shape.DebugString(), ") is empty")); OP_REQUIRES_OK(context, functor::ReshapeSparseTensorFunctor()( context, input_shape, output_shape, input_indices_in.matrix(), diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index 9f61bd6341a420..dc999c516c7706 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -667,6 +667,11 @@ class ResourceGatherOp : public OpKernel { OP_REQUIRES( c, TensorShapeUtils::IsVectorOrHigher(params.shape()), errors::InvalidArgument("params must be at least 1 dimensional")); + OP_REQUIRES( + c, params.shape().dims() >= batch_dims_, + errors::InvalidArgument("params must have at least ", batch_dims_, + " (batch_dims) dimensions but it has shape ", + params.shape().DebugString())); // Check that we have enough index space const int64 N = indices.NumElements(); @@ -712,7 +717,8 @@ class ResourceGatherOp : public OpKernel { copy_functor(c->eigen_device(), tmp_indices.flat(), indices.flat()); - AddBatchOffsets(&tmp_indices, params); + AddBatchOffsets(c, &tmp_indices, params); + if (!c->status().ok()) return; op_indices = &tmp_indices; } @@ -744,11 +750,17 @@ class ResourceGatherOp : public OpKernel { // Example: batch_dims = 1, indices = [[0, 1, 2], [0, 1, 2]] // If indexing into a params dimension of size 4, then the indices will become // [0, 1, 2, 4, 5, 6] - void AddBatchOffsets(Tensor* indices, const Tensor& params) { + void AddBatchOffsets(OpKernelContext* ctx, Tensor* indices, + const Tensor& params) { int64 batch_size = 1; // The size of all batch dimensions. for (int idx = 0; idx < batch_dims_; ++idx) { batch_size *= params.dim_size(idx); } + OP_REQUIRES( + ctx, batch_size != 0, + errors::InvalidArgument( + "Inner size of indices would result in batch_size of 0 and a ", + "division by 0 in the implementation. This is illegal")); auto indices_flat = indices->flat(); int64 const index_inner_size = indices->NumElements() / batch_size; @@ -868,6 +880,35 @@ TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU); #undef REGISTER_GATHER_ND_ALL_INDICES #undef REGISTER_GATHER_ND_FULL +namespace { + +template +bool isCPUDevice() { + return false; +} + +template <> +bool isCPUDevice() { + return true; +} + +template +bool ValidateInput(const Tensor& updates) { + const auto updates_flat = updates.flat(); + const T zero(0); + for (int i = 0; i < updates.NumElements(); i++) { + if (updates_flat(i) == zero) return false; + } + return true; +} + +template <> +bool ValidateInput(const Tensor& updates) { + return true; +} + +} // namespace + template class ResourceScatterUpdateOp : public OpKernel { public: @@ -934,6 +975,12 @@ class ResourceScatterUpdateOp : public OpKernel { " indexing: ", params->dim_size(0), " > ", std::numeric_limits::max())); + // Prevent division by 0 + if (isCPUDevice() && op == tensorflow::scatter_op::UpdateOp::DIV) { + OP_REQUIRES(c, ValidateInput(updates), + errors::InvalidArgument("updates must not contain 0")); + } + if (N > 0) { auto indices_flat = indices.flat(); auto params_flat = params->flat_outer_dims(); @@ -950,11 +997,12 @@ class ResourceScatterUpdateOp : public OpKernel { params->dim_size(0), ")")); } else { int64 num_updates = updates.NumElements(); - OP_REQUIRES(c, num_updates % N == 0, - errors::InvalidArgument( - "shape of indices (", indices.shape().DebugString(), - ") is not compatible with the shape of updates (", - updates.shape().DebugString(), ")")); + OP_REQUIRES( + c, TensorShapeUtils::StartsWith(updates.shape(), indices.shape()), + errors::InvalidArgument( + "The shape of indices (", indices.shape().DebugString(), + ") must be a prefix of the shape of updates (", + updates.shape().DebugString(), ")")); auto updates_flat = updates.shaped({N, num_updates / N}); functor::ScatterFunctor functor; diff --git a/tensorflow/core/kernels/save_restore_tensor.cc b/tensorflow/core/kernels/save_restore_tensor.cc index 020e38c3462610..6cc3a28437feeb 100644 --- a/tensorflow/core/kernels/save_restore_tensor.cc +++ b/tensorflow/core/kernels/save_restore_tensor.cc @@ -151,11 +151,18 @@ void RestoreTensor(OpKernelContext* context, context, size == 1, errors::InvalidArgument( "Input 0 (file_pattern) must be a string scalar; got a tensor of ", - size, "elements")); + size, " elements")); } const string& file_pattern = file_pattern_t.flat()(0); const Tensor& tensor_name_t = context->input(1); + { + const int64_t size = tensor_name_t.NumElements(); + OP_REQUIRES(context, size > restore_index, + errors::InvalidArgument( + "Input 1 (file_pattern) must be a have at least ", + restore_index + 1, " elements")); + } const string& tensor_name = tensor_name_t.flat()(restore_index); // If we cannot find a cached reader we will allocate our own. diff --git a/tensorflow/core/kernels/save_restore_v2_ops.cc b/tensorflow/core/kernels/save_restore_v2_ops.cc index 44738d0f0cca3c..809a26030b850e 100644 --- a/tensorflow/core/kernels/save_restore_v2_ops.cc +++ b/tensorflow/core/kernels/save_restore_v2_ops.cc @@ -98,6 +98,7 @@ class SaveV2 : public OpKernel { const Tensor& shape_and_slices = context->input(2); ValidateInputs(true /* is save op */, context, prefix, tensor_names, shape_and_slices); + if (!context->status().ok()) return; const int kFixedInputs = 3; // Prefix, tensor names, shape_and_slices. const int num_tensors = static_cast(tensor_names.NumElements()); @@ -177,6 +178,7 @@ class RestoreV2 : public OpKernel { " expected dtypes.")); ValidateInputs(false /* not save op */, context, prefix, tensor_names, shape_and_slices); + if (!context->status().ok()) return; const string& prefix_string = prefix.scalar()(); diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc index 11a3be8bf46a76..ed7149bf8365d8 100644 --- a/tensorflow/core/kernels/sdca_internal.cc +++ b/tensorflow/core/kernels/sdca_internal.cc @@ -380,6 +380,11 @@ Status Examples::Initialize(OpKernelContext* const context, const Tensor* example_labels_t; TF_RETURN_IF_ERROR(context->input("example_labels", &example_labels_t)); auto example_labels = example_labels_t->flat(); + if (example_labels.size() != num_examples) { + return errors::InvalidArgument("Expected ", num_examples, + " example labels but got ", + example_labels.size()); + } OpInputList dense_features_inputs; TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/searchsorted_op.cc b/tensorflow/core/kernels/searchsorted_op.cc index 01e221dc471c4d..5f075a6a540e9f 100644 --- a/tensorflow/core/kernels/searchsorted_op.cc +++ b/tensorflow/core/kernels/searchsorted_op.cc @@ -86,6 +86,10 @@ class UpperBoundOp : public OpKernel { const Tensor& sorted_inputs_t = ctx->input(0); const Tensor& values_t = ctx->input(1); + // inputs must be at least a matrix + OP_REQUIRES( + ctx, sorted_inputs_t.shape().dims() >= 2, + errors::InvalidArgument("sorted input argument must be a matrix")); // must have same batch dim_size for both OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), Status(error::INVALID_ARGUMENT, @@ -127,6 +131,10 @@ class LowerBoundOp : public OpKernel { const Tensor& sorted_inputs_t = ctx->input(0); const Tensor& values_t = ctx->input(1); + // inputs must be at least a matrix + OP_REQUIRES( + ctx, sorted_inputs_t.shape().dims() >= 2, + errors::InvalidArgument("sorted input argument must be a matrix")); // must have same batch dim_size for both OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), Status(error::INVALID_ARGUMENT, diff --git a/tensorflow/core/kernels/segment_reduction_ops_impl.h b/tensorflow/core/kernels/segment_reduction_ops_impl.h index 81c9ba869ab50c..fdb70ed3257721 100644 --- a/tensorflow/core/kernels/segment_reduction_ops_impl.h +++ b/tensorflow/core/kernels/segment_reduction_ops_impl.h @@ -18,6 +18,10 @@ limitations under the License. #ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_IMPL_H_ #define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_IMPL_H_ +#include + +#include "tensorflow/core/framework/op_requires.h" +#include "tensorflow/core/platform/types.h" #define EIGEN_USE_THREADS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU @@ -111,7 +115,9 @@ class SegmentReductionOp : public OpKernel { errors::InvalidArgument("segment ids must be >= 0")); TensorShape output_shape = input.shape(); - output_shape.set_dim(0, output_rows); + // Since we're changing the first dimension of the shape, we need to make + // sure the new shape won't overflow. + OP_REQUIRES_OK(context, output_shape.SetDimWithStatus(0, output_rows)); // Note that we do not initialize the output buffer with a default value, so // we need to explicitly set missing indices to the default value. @@ -287,7 +293,10 @@ class SegmentReductionGPUOp : public AsyncOpKernel { done); TensorShape output_shape = input.shape(); - output_shape.set_dim(0, output_rows); + // Since we're changing the first dimension of the shape, we need to make + // sure the new shape won't overflow. + OP_REQUIRES_OK_ASYNC(context, + output_shape.SetDimWithStatus(0, output_rows), done); Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC( @@ -455,6 +464,7 @@ class SparseSegmentReductionOpBase : public OpKernel { bool is_mean, bool is_sqrtn, bool has_num_segments, T default_value) : OpKernel(context), + dtidx_(DataTypeToEnum::v()), is_mean_(is_mean), is_sqrtn_(is_sqrtn), has_num_segments_(has_num_segments), @@ -494,10 +504,20 @@ class SparseSegmentReductionOpBase : public OpKernel { const auto segment_vec = segment_ids.vec(); // Note that the current implementation assumes that segment_vec values are // sorted. + const SegmentId last_segment_id = + num_indices > 0 ? segment_vec(num_indices - 1) : 0; + int64_t limit = dtidx_ == DataType::DT_INT32 ? kint32max : kint64max; + + OP_REQUIRES( + context, last_segment_id < limit, + errors::InvalidArgument("Last segment id must be < kintmax, got ", + last_segment_id, " limit ", limit)); + const SegmentId last_segment_id_plus_one = num_indices > 0 ? internal::SubtleMustCopy(segment_vec(num_indices - 1)) + 1 : 0; + if (has_num_segments_) { OP_REQUIRES( context, output_rows >= last_segment_id_plus_one, @@ -509,7 +529,7 @@ class SparseSegmentReductionOpBase : public OpKernel { errors::InvalidArgument("segment ids must be >= 0")); TensorShape output_shape = input.shape(); - output_shape.set_dim(0, output_rows); + OP_REQUIRES_OK(context, output_shape.SetDimWithStatus(0, output_rows)); // Note that we do not initialize the output buffer with a default value, so // we need to explicitly set missing indices to the default value. @@ -596,6 +616,7 @@ class SparseSegmentReductionOpBase : public OpKernel { } private: + const DataType dtidx_; template using EnableIfBfloat16 = typename std::enable_if::value, int>::type; diff --git a/tensorflow/core/kernels/sequence_ops.cc b/tensorflow/core/kernels/sequence_ops.cc index d15f95125e0ac3..7ae61e7b6a47c4 100644 --- a/tensorflow/core/kernels/sequence_ops.cc +++ b/tensorflow/core/kernels/sequence_ops.cc @@ -71,13 +71,23 @@ class RangeOp : public OpKernel { errors::InvalidArgument( "Requires start >= limit when delta < 0: ", start, "/", limit)); } - int64 size = (std::is_integral::value - ? ((std::abs(limit - start) + std::abs(delta) - 1) / - std::abs(delta)) - : std::ceil(std::abs((limit - start) / delta))); + auto size_auto = (std::is_integral::value + ? (Eigen::numext::abs(limit - start) + + Eigen::numext::abs(delta) - T(1)) / + Eigen::numext::abs(delta) + : Eigen::numext::ceil( + Eigen::numext::abs((limit - start) / delta))); + OP_REQUIRES( + context, size_auto <= std::numeric_limits::max(), + errors::InvalidArgument("Requires ((limit - start) / delta) <= ", + std::numeric_limits::max())); + + int64 size = static_cast(size_auto); + + TensorShape shape; + OP_REQUIRES_OK(context, shape.AddDimWithStatus(size)); Tensor* out = nullptr; - OP_REQUIRES_OK(context, - context->allocate_output(0, TensorShape({size}), &out)); + OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out)); auto flat = out->flat(); T val = start; for (int64 i = 0; i < size; ++i) { diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc index 07cc6c86a1735e..de0ef96aa87c6a 100644 --- a/tensorflow/core/kernels/serialize_sparse_op.cc +++ b/tensorflow/core/kernels/serialize_sparse_op.cc @@ -23,9 +23,11 @@ limitations under the License. #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/variant.h" @@ -364,7 +366,10 @@ class SerializeManySparseOp : public OpKernel { errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); - TensorShape tensor_input_shape(input_shape->vec()); + TensorShape tensor_input_shape; + OP_REQUIRES_OK(context, + TensorShape::BuildTensorShape(input_shape->vec(), + &tensor_input_shape)); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc index fd6c7040637f51..ea91f82eec1e6c 100644 --- a/tensorflow/core/kernels/set_kernels.cc +++ b/tensorflow/core/kernels/set_kernels.cc @@ -35,6 +35,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -67,8 +68,9 @@ Status SparseTensorFromContext(OpKernelContext* ctx, const int32 base_index, bool validate_indices, sparse::SparseTensor* tensor) { // Assume row-major order. - const TensorShape shape = - TensorShape(ctx->input(base_index + 2).vec()); + TensorShape shape; + TF_RETURN_IF_ERROR(TensorShape::BuildTensorShape( + ctx->input(base_index + 2).vec(), &shape)); CheckRankAtLeast2(ctx, shape); std::vector order(shape.dims()); std::iota(order.begin(), order.end(), 0); diff --git a/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc b/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc index 2548ceaa57cc63..dcdad591438f25 100644 --- a/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc +++ b/tensorflow/core/kernels/sparse/sparse_tensor_to_csr_sparse_matrix_op.cc @@ -76,10 +76,18 @@ class SparseTensorToCSRSparseMatrixCPUOp : public OpKernel { const int64 total_nnz = values.NumElements(); // Allocate output Tensors. - Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); - Tensor csr_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); - Tensor csr_row_ptr(cpu_allocator(), DT_INT32, - TensorShape({(num_rows + 1) * batch_size})); + TensorShape batch_ptr_shape; + OP_REQUIRES_OK( + ctx, TensorShape::BuildTensorShape({batch_size + 1}, &batch_ptr_shape)); + Tensor batch_ptr(cpu_allocator(), DT_INT32, batch_ptr_shape); + TensorShape csr_col_ind_shape; + OP_REQUIRES_OK( + ctx, TensorShape::BuildTensorShape({total_nnz}, &csr_col_ind_shape)); + Tensor csr_col_ind(cpu_allocator(), DT_INT32, csr_col_ind_shape); + TensorShape csr_row_ind_shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape( + {(num_rows + 1) * batch_size}, &csr_row_ind_shape)); + Tensor csr_row_ptr(cpu_allocator(), DT_INT32, csr_row_ind_shape); // Fill the row pointers with zeros. functor::SetZeroFunctor set_zero; diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index dac4a3d3e6bfcd..6e3f4969bcf14e 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -78,15 +78,30 @@ class SparseDenseBinaryOpShared : public OpKernel { "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); + OP_REQUIRES( + ctx, TensorShapeUtils::IsVector(shape_t->shape()), + errors::InvalidArgument("Input sp_shape must be a vector. Got: ", + shape_t->shape().DebugString())); OP_REQUIRES( ctx, values_t->dim_size(0) == indices_t->dim_size(0), errors::InvalidArgument( "The first dimension of values and indices should match. (", values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); + OP_REQUIRES( + ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", shape_t->shape().dim_size(0), + " dimensions, indices shape: ", indices_t->shape().DebugString())); + OP_REQUIRES(ctx, shape_t->NumElements() > 0, + errors::InvalidArgument( + "The shape argument requires at least one element.")); const auto indices_mat = indices_t->matrix(); const auto shape_vec = shape_t->vec(); - const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec)); + TensorShape lhs_shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape)); + const auto lhs_dims = BCast::FromShape(lhs_shape); const auto rhs_dims = BCast::FromShape(dense_t->shape()); BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims. @@ -114,7 +129,10 @@ class SparseDenseBinaryOpShared : public OpKernel { OP_REQUIRES_OK( ctx, ctx->allocate_temp(DataTypeToEnum::value, TensorShape({nnz}), &dense_gathered)); - + bool op_is_div = false; + if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) { + op_is_div = true; + } // Pulls relevant entries from the dense side, with reshape and broadcasting // *of the dense side* taken into account. Use a TensorRef to avoid blowing // up memory. @@ -143,6 +161,12 @@ class SparseDenseBinaryOpShared : public OpKernel { errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \ "dense side with broadcasted shape")); \ dense_gathered_flat(i) = rhs_ref.coeff(idx); \ + if (op_is_div) { \ + OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \ + errors::InvalidArgument( \ + "SparseDenseCwiseDiv cannot divide by zero," \ + "but input dense tensor contains zero ")); \ + } \ } \ break; \ } diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index af3201463344ad..1dfb0bad2969eb 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -24,11 +24,13 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -211,6 +213,10 @@ class SparseFillEmptyRowsOp : public OpKernel { OP_REQUIRES(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString())); + OP_REQUIRES(context, indices_t.dim_size(0) == values_t.dim_size(0), + errors::InvalidArgument("The length of `values` (", values_t.dim_size(0), + ") must match the first dimension of `indices` (", + indices_t.dim_size(0), ").")); OP_REQUIRES(context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString())); diff --git a/tensorflow/core/kernels/sparse_matmul_op.cc b/tensorflow/core/kernels/sparse_matmul_op.cc index a02afafa33e3ad..6bf9dfa3d8bb75 100644 --- a/tensorflow/core/kernels/sparse_matmul_op.cc +++ b/tensorflow/core/kernels/sparse_matmul_op.cc @@ -32,6 +32,7 @@ limitations under the License. #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/blocking_counter.h" #include "tensorflow/core/lib/core/threadpool.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" @@ -980,9 +981,18 @@ class SparseMatMulOp : public OpKernel { errors::InvalidArgument( "Matrix size incompatible: a: ", a.shape().DebugString(), ", b: ", b.shape().DebugString())); + OP_REQUIRES(ctx, m >= 0 && n >= 0 && k >= 0, + errors::InvalidArgument( + "Matrix dimensions cannot be negative: a: ", + a.shape().DebugString(), ", b: ", b.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({m, n}), &output)); + // Return early if at least one of the output dimension size is 0. + if (m == 0 || n == 0) { + return; + } + if (k == 0) { // If the inner dimension k in the matrix multiplication is zero, we fill // the output with zeros. diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc index b65f31e5eb174e..ef0d89069ce6fa 100644 --- a/tensorflow/core/kernels/sparse_reduce_op.cc +++ b/tensorflow/core/kernels/sparse_reduce_op.cc @@ -18,8 +18,10 @@ limitations under the License. #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" @@ -172,10 +174,13 @@ class SparseReduceOp : public OpKernel { // making deep copies here. Remove this if/when we change Reorder()'s // semantics. const auto shape_vec = shape_t->vec(); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &shape)); + SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create( tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_vec), &sp)); + shape, &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); @@ -219,7 +224,20 @@ class SparseReduceOp : public OpKernel { sp.Reorder(reduction.reorder_dims); for (const auto &g : sp.group(reduction.group_by_dims)) { Op::template Run(ctx, reduced_val, g.template values()); + OP_REQUIRES(ctx, + output_strides.empty() || + (g.group().size() == output_strides.size()), + errors::Internal( + "Expected group size and output_strides size to match", + ", but got ", g.group().size(), " and ", + output_strides.size())); const int64 idx = CoordinatesToFlatIndex(g.group(), output_strides); + OP_REQUIRES(ctx, + idx >= 0 && idx < out_flat.size(), + errors::Internal( + "Obtained a write index of ", idx, + " which is outside of bounds of [0, ", + out_flat.size(), ")")); out_flat(idx) = reduced_val(); VLOG(2) << "coords: " << absl::StrJoin(g.group(), ",") << "; idx: " << idx << "; group " << Op::Name() << ": " @@ -262,10 +280,13 @@ class SparseReduceSparseOp : public OpKernel { OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t)); + TensorShape shape; + OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_t->vec(), + &shape)); SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_t->vec()), &sp)); + shape, &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); diff --git a/tensorflow/core/kernels/sparse_slice_op.cc b/tensorflow/core/kernels/sparse_slice_op.cc index 6aaf4fd88fbe89..373fb87d028ef7 100644 --- a/tensorflow/core/kernels/sparse_slice_op.cc +++ b/tensorflow/core/kernels/sparse_slice_op.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -67,27 +68,38 @@ class SparseSliceOp : public OpKernel { " but got length ", input_size.NumElements())); sparse::SparseTensor sparse_tensor; + TensorShape sparse_tensor_shape; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create( - input_indices, input_values, - TensorShape(input_shape.vec()), &sparse_tensor)); + TensorShapeBase::BuildTensorShapeBase( + input_shape.vec(), &sparse_tensor_shape)); + OP_REQUIRES_OK(context, sparse::SparseTensor::Create( + input_indices, input_values, + sparse_tensor_shape, &sparse_tensor)); const gtl::ArraySlice start(input_start.flat().data(), input_dims); const gtl::ArraySlice size(input_size.flat().data(), input_dims); - const sparse::SparseTensor output = + const StatusOr output_or = sparse::SparseTensor::Slice(sparse_tensor, start, size); + OP_REQUIRES_OK(context, output_or.status()); + auto output = output_or.ValueOrDie(); context->set_output(0, output.indices()); context->set_output(1, output.values()); - const TensorShape output_shape(output.shape()); + TensorShape output_shape; + OP_REQUIRES_OK(context, TensorShapeBase::BuildTensorShapeBase( + output.shape(), &output_shape)); + + TensorShape allocated_shape; + OP_REQUIRES_OK(context, TensorShapeBase::BuildTensorShapeBase( + {output_shape.dims()}, &allocated_shape)); Tensor* shape = nullptr; OP_REQUIRES_OK(context, - context->allocate_output(2, {output_shape.dims()}, &shape)); + context->allocate_output(2, allocated_shape, &shape)); for (int dim = 0; dim < output_shape.dims(); ++dim) { shape->vec()(dim) = output_shape.dim_size(dim); } diff --git a/tensorflow/core/kernels/sparse_softmax_op.cc b/tensorflow/core/kernels/sparse_softmax_op.cc index 548080b8b13738..0fdb8acf963eaf 100644 --- a/tensorflow/core/kernels/sparse_softmax_op.cc +++ b/tensorflow/core/kernels/sparse_softmax_op.cc @@ -21,6 +21,7 @@ limitations under the License. #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" @@ -62,14 +63,16 @@ class SparseSoftmaxOp : public OpKernel { errors::InvalidArgument( "Input should have rank >= 2, but received shape: ", shape_t->SummarizeValue(3))); + TensorShape shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape( + shape_t->flat(), &shape)); const int64 nnz = indices_t->dim_size(0); const int rank = static_cast(indices_t->dim_size(1)); SparseTensor st; OP_REQUIRES_OK( - context, SparseTensor::Create( - tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), - TensorShape(shape_t->flat()), &st)); + context, SparseTensor::Create(tensor::DeepCopy(*indices_t), + tensor::DeepCopy(*values_t), shape, &st)); Tensor *output_values = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nnz}), diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index 3b88a8ca2bf6ee..dfc572fe5a0f37 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -30,11 +30,15 @@ class SparseSplitOp : public OpKernel { } void Compute(OpKernelContext* context) override { - const int64 axis_input = context->input(0).scalar()(); + const Tensor& input_axis = context->input(0); const Tensor& input_indices = context->input(1); const Tensor& input_values = context->input(2); const Tensor& input_shape = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(input_axis.shape()), + errors::InvalidArgument( + "Input axis should be a scalar but received shape ", + input_axis.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices.shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", @@ -48,7 +52,8 @@ class SparseSplitOp : public OpKernel { "Input shape should be a vector but received shape ", input_shape.shape().DebugString())); - const int64 input_rank = input_shape.vec().size(); + const int64 axis_input = input_axis.scalar()(); + const int64 input_rank = input_shape.vec().size(); const int64 axis = (axis_input < 0) ? input_rank + axis_input : axis_input; OP_REQUIRES( diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc index 5ea5fca544d3e9..8b13cf5d5ce48f 100644 --- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc +++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc @@ -234,16 +234,29 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { errors::InvalidArgument( "Input indices should be a matrix but received shape ", input_indices->shape().DebugString())); - OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", input_values->shape().DebugString())); - OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", input_shape->shape().DebugString())); + OP_REQUIRES( + context, + input_values->shape().dim_size(0) == input_indices->shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices. ", "Got ", + input_values->shape().dim_size(0), + " values, indices shape: ", input_indices->shape().DebugString())); + OP_REQUIRES( + context, + input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1), + errors::InvalidArgument( + "Number of dimensions must match second dimension of indices. ", + "Got ", input_shape->shape().dim_size(0), + " dimensions, indices shape: ", + input_indices->shape().DebugString())); int rank = input_shape->NumElements(); @@ -253,21 +266,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); auto input_shape_vec = input_shape->vec(); - int new_num_elements = 1; - bool overflow_ocurred = false; - for (int i = 0; i < input_shape_vec.size(); i++) { - new_num_elements = - MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i)); - if (new_num_elements < 0) { - overflow_ocurred = true; - } - } - - OP_REQUIRES( - context, !overflow_ocurred, - errors::Internal("Encountered overflow from large input shape.")); - TensorShape tensor_input_shape(input_shape_vec); + TensorShape tensor_input_shape; + OP_REQUIRES_OK(context, TensorShape::BuildTensorShape(input_shape_vec, + &tensor_input_shape)); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; diff --git a/tensorflow/core/kernels/split_v_op.cc b/tensorflow/core/kernels/split_v_op.cc index fc070610877d5f..3497f95f13d306 100644 --- a/tensorflow/core/kernels/split_v_op.cc +++ b/tensorflow/core/kernels/split_v_op.cc @@ -138,8 +138,17 @@ class SplitVOpBase : public OpKernel { (*split_sizes_vec)[neg_one_dim] = input_size_split_dim - determined_size; } - // Special case 2: split along the 1st dimension. We can share the - // underlying buffer. + for (int i = 0; i < split_sizes_vec->size(); ++i) { + const Tlen& split_size = (*split_sizes_vec)[i]; + OP_REQUIRES(context, split_size >= Tlen(0), + errors::InvalidArgument("Split size at index ", i, + " must be >= 0. Got: ", split_size)); + } + + // Special case 2: split along the 1st dimension. The requirements are that + // either we are splitting the outer dimension of two or more such that + // every outer subpart is aligned or that the split sizes mean that they are + // always aligned. In these cases, we can share the underlying buffer. // // Apply this optimization conservatively: if input is aligned, // the resulting tensors must be aligned. It's conservative diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 7008a1d766af25..7b004983ba6823 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -13,13 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include #include #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/platform/types.h" +#include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace text { @@ -47,12 +51,24 @@ class StringNGramsOp : public tensorflow::OpKernel { ngram_width - 1); } - int get_num_ngrams(const int length, const int ngram_width) const { + StatusOr get_num_ngrams(const int length, const int ngram_width) const { + int64 limit = kint32max; int pad_width = get_pad_width(ngram_width); + if (pad_width > limit / 2 - length) { + return errors::InvalidArgument( + "Pad width could lead to integer overflow, got pad_width = ", + pad_width); + } return std::max(0, ((length + 2 * pad_width) - ngram_width) + 1); } void Compute(tensorflow::OpKernelContext* context) override { + for (int ngram_width : ngram_widths_) { + OP_REQUIRES( + context, ngram_width > 0, + errors::InvalidArgument("ngram_widths must contain positive values")); + } + const tensorflow::Tensor* data; OP_REQUIRES_OK(context, context->input("data", &data)); const auto& input_data = data->flat().data(); @@ -106,8 +122,11 @@ class StringNGramsOp : public tensorflow::OpKernel { for (int i = 1; i <= num_batch_items; ++i) { int length = splits_vec(i) - splits_vec(i - 1); int num_ngrams = 0; - for (int ngram_width : ngram_widths_) - num_ngrams += get_num_ngrams(length, ngram_width); + for (int ngram_width : ngram_widths_) { + auto ngrams_or = get_num_ngrams(length, ngram_width); + OP_REQUIRES_OK(context, ngrams_or.status()); + num_ngrams += ngrams_or.ValueOrDie(); + } if (preserve_short_ && length > 0 && num_ngrams == 0) { num_ngrams = 1; } @@ -127,7 +146,9 @@ class StringNGramsOp : public tensorflow::OpKernel { for (int ngram_width : ngram_widths_) { auto output_start = &ngrams_data[output_start_idx]; int length = splits_vec(i + 1) - splits_vec(i); - int num_ngrams = get_num_ngrams(length, ngram_width); + auto ngrams_or = get_num_ngrams(length, ngram_width); + OP_REQUIRES_OK(context, ngrams_or.status()); + int num_ngrams = ngrams_or.ValueOrDie(); CreateNgrams(data_start, output_start, num_ngrams, ngram_width); output_start_idx += num_ngrams; } @@ -146,6 +167,16 @@ class StringNGramsOp : public tensorflow::OpKernel { // We don't have to worry about dynamic padding sizes here: if padding // was dynamic, every sequence would have had sufficient padding to // generate at least one ngram. + + // If reached here, pad_width should be > 0, pad_width_ = -1, + // which indicates max(ngram_widths) - 1 cannot be used here since + // ngram_width is not known. + OP_REQUIRES( + context, pad_width_ >= 0, + errors::InvalidArgument("Pad width should be >= 0 when " + "preserve_short_sequences is True and " + "ngram_widths are not provided, got ", + pad_width_)); int ngram_width = data_length + 2 * pad_width_; auto output_start = &ngrams_data[output_start_idx]; int num_ngrams = 1; diff --git a/tensorflow/core/kernels/summary_kernels.cc b/tensorflow/core/kernels/summary_kernels.cc index 7f888da69d6c7f..6764deee21d286 100644 --- a/tensorflow/core/kernels/summary_kernels.cc +++ b/tensorflow/core/kernels/summary_kernels.cc @@ -38,12 +38,20 @@ class CreateSummaryFileWriterOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor* tmp; OP_REQUIRES_OK(ctx, ctx->input("logdir", &tmp)); + OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()), + errors::InvalidArgument("logdir must be a scalar")); const string logdir = tmp->scalar()(); OP_REQUIRES_OK(ctx, ctx->input("max_queue", &tmp)); - const int32 max_queue = tmp->scalar()(); + OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()), + errors::InvalidArgument("max_queue must be a scalar")); + const int32_t max_queue = tmp->scalar()(); OP_REQUIRES_OK(ctx, ctx->input("flush_millis", &tmp)); - const int32 flush_millis = tmp->scalar()(); + OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()), + errors::InvalidArgument("flush_millis must be a scalar")); + const int32_t flush_millis = tmp->scalar()(); OP_REQUIRES_OK(ctx, ctx->input("filename_suffix", &tmp)); + OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()), + errors::InvalidArgument("filename_suffix must be a scalar")); const string filename_suffix = tmp->scalar()(); core::RefCountPtr s; diff --git a/tensorflow/core/kernels/tile_ops.cc b/tensorflow/core/kernels/tile_ops.cc index 7d967d8761077f..4cc6557247d272 100644 --- a/tensorflow/core/kernels/tile_ops.cc +++ b/tensorflow/core/kernels/tile_ops.cc @@ -188,7 +188,8 @@ class TileOp : public OpKernel { context, multiples_array[i] >= 0, errors::InvalidArgument("Expected multiples[", i, "] >= 0, but got ", multiples_array[i])); - output_shape.AddDim(input.dim_size(i) * multiples_array[i]); + OP_REQUIRES_OK(context, output_shape.AddDimWithStatus( + input.dim_size(i) * multiples_array[i])); } if (output_shape == input.shape()) { context->set_output(0, input); diff --git a/tensorflow/core/kernels/unicode_ops.cc b/tensorflow/core/kernels/unicode_ops.cc index e6c8f4dfc42284..ab09dbe1d54293 100644 --- a/tensorflow/core/kernels/unicode_ops.cc +++ b/tensorflow/core/kernels/unicode_ops.cc @@ -533,6 +533,10 @@ class UnicodeEncodeOp : public OpKernel { const Tensor& input_splits = context->input(1); const auto input_splits_flat = input_splits.flat(); + OP_REQUIRES( + context, input_splits.NumElements() > 0, + errors::InvalidArgument("Input_splits should contain elements, but " + "given input_values has 0 elements")); // Operation will treat first argument in input_splits as if it were zero // regardless of its actual value since splits should begin with zero and // end with the length of the input values vector. diff --git a/tensorflow/core/kernels/unravel_index_op.cc b/tensorflow/core/kernels/unravel_index_op.cc index 11d9dac70f7046..c9e2b33f3f0b6d 100644 --- a/tensorflow/core/kernels/unravel_index_op.cc +++ b/tensorflow/core/kernels/unravel_index_op.cc @@ -13,6 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/types.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" @@ -35,7 +39,8 @@ typedef Eigen::ThreadPoolDevice CPUDevice; template class UnravelIndexOp : public OpKernel { public: - explicit UnravelIndexOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} + explicit UnravelIndexOp(OpKernelConstruction* ctx) + : OpKernel(ctx), dtidx_(DataTypeToEnum::v()) {} void Compute(OpKernelContext* ctx) override { const Tensor& indices_tensor = ctx->input(0); @@ -53,6 +58,33 @@ class UnravelIndexOp : public OpKernel { dims_tensor.shape().DebugString(), "\"")); auto dims = dims_tensor.vec(); + // Make sure dims does not contain a zero + double prod = 1; + uint64_t limit; + if (dtidx_ == DataType::DT_INT64) { + limit = kint64max; + } else { + limit = kint32max; + } + + for (int i = 0; i < dims.size(); i++) { + OP_REQUIRES( + ctx, dims(i) != 0, + errors::InvalidArgument("Input dims cannot contain a dim of zero, " + "but dims contains zero at index ", + i)); + OP_REQUIRES(ctx, dims(i) > 0, + errors::InvalidArgument( + "Input dims cannot be negative. Got dim = ", dims(i), + " at index ", i)); + // Check interger overflow + OP_REQUIRES( + ctx, prod <= limit / dims(i), + errors::InvalidArgument("Input dims product is causing integer " + "overflow: (", + dims, ")")); + prod = (prod * dims(i)); + } // Chek to make sure indices is not out of boundary Eigen::Tensor dims_prod_eigen = dims.prod(); @@ -124,6 +156,7 @@ class UnravelIndexOp : public OpKernel { strides_shifted.reshape(reshape).broadcast(bcast); } } + const DataType dtidx_; }; #define REGISTER_KERNEL(type) \ diff --git a/tensorflow/core/kernels/xent_op.cc b/tensorflow/core/kernels/xent_op.cc index 0e826274f2ebd3..56c3fd9881bea8 100644 --- a/tensorflow/core/kernels/xent_op.cc +++ b/tensorflow/core/kernels/xent_op.cc @@ -44,7 +44,8 @@ class SoftmaxXentWithLogitsOp : public OpKernel { TensorShape shape_in = logits_in.shape(); BCast bcast(BCast::FromShape(logits_in.shape()), - BCast::FromShape(labels_in.shape())); + BCast::FromShape(labels_in.shape()), + /*fewer_dims_optimization=*/false); if (!logits_in.IsSameSize(labels_in)) { OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument( @@ -76,20 +77,12 @@ class SoftmaxXentWithLogitsOp : public OpKernel { {0}, 1, shape_in, &back_out)); if (shape_in.dim_size(0) > 0) { functor::XentFunctor functor; - if (logits_in.IsSameSize(labels_in)) { - functor(context->eigen_device(), shape_in.AsEigenDSizes<2>(), - Eigen::array{1, 1}, - Eigen::array{1, 1}, logits_in.matrix(), - labels_in.matrix(), scratch.matrix(), loss_out->vec(), - back_out->matrix()); - } else { - functor(context->eigen_device(), shape_in.AsEigenDSizes<2>(), - BCast::ToIndexArray<2>(bcast.x_bcast()), - BCast::ToIndexArray<2>(bcast.y_bcast()), - logits_in.template shaped(bcast.x_reshape()), - labels_in.template shaped(bcast.y_reshape()), - scratch.matrix(), loss_out->vec(), back_out->matrix()); - } + functor(context->eigen_device(), shape_in.AsEigenDSizes<2>(), + BCast::ToIndexArray<2>(bcast.x_bcast()), + BCast::ToIndexArray<2>(bcast.y_bcast()), + logits_in.template shaped(bcast.x_reshape()), + labels_in.template shaped(bcast.y_reshape()), + scratch.matrix(), loss_out->vec(), back_out->matrix()); } } }; diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc index 8b2ded5e4f7a45..04d727441179ae 100644 --- a/tensorflow/core/ops/array_ops.cc +++ b/tensorflow/core/ops/array_ops.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/strided_slice_op.h" @@ -168,7 +169,7 @@ Status TransposeShapeFn(InferenceContext* c) { for (int32 i = 0; i < rank; ++i) { int64 in_idx = data[i]; - if (in_idx >= rank) { + if (in_idx >= rank || in_idx <= -rank) { return errors::InvalidArgument("perm dim ", in_idx, " is out of range of input rank ", rank); } @@ -680,6 +681,12 @@ REGISTER_OP("SplitV") if (data[i] == -1 && c->ValueKnown(split_dim_size)) { size = split_dim_size - total_size; } + // If we have a negative known size (either explicit, or computed + // via -1), then the split sizes are invalid. + if (size < -1 || (size == -1 && c->ValueKnown(split_dim_size))) { + return errors::InvalidArgument("Split size at index ", i, + " must be >= 0. Got: ", size); + } TF_RETURN_IF_ERROR( c->ReplaceDim(input, split_dim, c->MakeDim(size), &output_shape)); c->set_output(i, output_shape); @@ -1631,11 +1638,21 @@ REGISTER_OP("ReverseSequence") return errors::InvalidArgument( "batch_dim must be < input rank: ", batch_dim, " vs. ", input_rank); } + if (seq_dim >= input_rank) { return errors::InvalidArgument( "seq_dim must be < input rank: ", seq_dim, " vs. ", input_rank); } + // To prevent out of bound access when calling c->Dim(input, batch_dim), + // batch_dim range [-1 * input rank, input rank) is allowed. However, + // the op implementation has a stricter bound for batch_dim requiring >= 0 + // value. Thus, perform strict check here. + if (batch_dim < 0) { + return errors::InvalidArgument("batch_dim must be >=0, got ", + batch_dim); + } + DimensionHandle batch_dim_dim = c->Dim(input, batch_dim); TF_RETURN_IF_ERROR( c->Merge(batch_dim_dim, c->Dim(seq_lens_shape, 0), &batch_dim_dim)); @@ -2841,7 +2858,10 @@ REGISTER_OP("QuantizeAndDequantizeV2") ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); - if (axis != -1) { + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; @@ -2873,7 +2893,10 @@ REGISTER_OP("QuantizeAndDequantizeV4") ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); - if (axis != -1) { + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; @@ -2901,7 +2924,10 @@ REGISTER_OP("QuantizeAndDequantizeV4Grad") ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(3), minmax, &minmax)); - if (axis != -1) { + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; @@ -2934,7 +2960,10 @@ REGISTER_OP("QuantizeAndDequantizeV3") ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); - if (axis != -1) { + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; @@ -3001,6 +3030,16 @@ REGISTER_OP("Dequantize") if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } + auto input_dims = c->Rank(c->input(0)); + if (axis > input_dims) { + return errors::InvalidArgument( + "Axis must be less than input dimension(", input_dims, "), got ", + axis); + } const int minmax_rank = (axis == -1) ? 0 : 1; TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle minmax; @@ -3008,6 +3047,13 @@ REGISTER_OP("Dequantize") TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); if (axis != -1) { ShapeHandle input; + if (axis >= kint32max) { + // Check int32 max bound for a corner case to prevent integer flow + // when input actually has kint32max rank and above bound check is not + // triggered. + return errors::InvalidArgument( + "Axis cannot be >= kint32max value, got ", axis); + } TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/ops/array_ops_test.cc b/tensorflow/core/ops/array_ops_test.cc index 412c926d3863a6..2fe45474bfaa44 100644 --- a/tensorflow/core/ops/array_ops_test.cc +++ b/tensorflow/core/ops/array_ops_test.cc @@ -1363,6 +1363,8 @@ TEST(ArrayOpsTest, QuantizeAndDequantizeV2_ShapeFn) { INFER_ERROR("Shapes must be equal rank, but are 1 and 0", op, "[1,2,?,4,5];[];[1]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1,2,?,4,5];[1];[1]"); + (*op.node_def.mutable_attr())["axis"].set_i(-2); + INFER_ERROR("axis should be at least -1, got -2", op, "?;?;?"); } TEST(ArrayOpsTest, SpaceToBatch_ShapeFn) { diff --git a/tensorflow/core/ops/count_ops.cc b/tensorflow/core/ops/count_ops.cc index 8de0a2ef95459b..95e8026fd8b663 100644 --- a/tensorflow/core/ops/count_ops.cc +++ b/tensorflow/core/ops/count_ops.cc @@ -41,6 +41,8 @@ Status DenseCountSparseOutputShapeFn(InferenceContext *c) { } Status SparseCountSparseOutputShapeFn(InferenceContext *c) { + ShapeHandle unused; + TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused)); auto rank = c->Dim(c->input(0), 1); auto nvals = c->UnknownDim(); c->set_output(0, c->Matrix(nvals, rank)); // out.indices diff --git a/tensorflow/core/ops/cudnn_rnn_ops.cc b/tensorflow/core/ops/cudnn_rnn_ops.cc index 1dd7659e137fe3..ff6d2852a66271 100644 --- a/tensorflow/core/ops/cudnn_rnn_ops.cc +++ b/tensorflow/core/ops/cudnn_rnn_ops.cc @@ -81,11 +81,17 @@ REGISTER_OP("CudnnRNN") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); + string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; @@ -124,8 +130,13 @@ REGISTER_OP("CudnnRNNV2") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); @@ -171,16 +182,26 @@ REGISTER_OP("CudnnRNNV3") .Attr("is_training: bool = true") .Attr("time_major: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 1, &unused)); + auto max_seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); + string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; TF_RETURN_IF_ERROR(c->GetAttr("rnn_mode", &rnn_mode)); + if (rnn_mode == "lstm") { + TF_RETURN_IF_ERROR(c->WithRank(input_c_shape, 3, &unused)); + } int dir_count = (direction == "bidirectional") ? 2 : 1; DimensionHandle output_size; TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); diff --git a/tensorflow/core/ops/cudnn_rnn_ops_test.cc b/tensorflow/core/ops/cudnn_rnn_ops_test.cc index 8e8c8193a14a48..91043efa425a0b 100644 --- a/tensorflow/core/ops/cudnn_rnn_ops_test.cc +++ b/tensorflow/core/ops/cudnn_rnn_ops_test.cc @@ -68,6 +68,11 @@ TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); + // Disabled because the kernel does not check shape of input_c. + // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { @@ -100,6 +105,11 @@ TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); + // Disabled because the kernel does not check shape of input_c. + // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { @@ -137,6 +147,52 @@ TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[?];[]"); +} + +TEST(CudnnRNNOpsTest, ForwardV3Gru) { + int max_seq_length = 2; + int batch_size = 3; + int num_units = 4; + int num_layers = 5; + int dir_count = 1; + std::vector input_shape = {max_seq_length, batch_size, num_units}; + std::vector input_h_shape = {num_layers * dir_count, batch_size, + num_units}; + std::vector input_c_shape = {num_layers * dir_count, batch_size, + num_units}; + std::vector output_shape = {max_seq_length, batch_size, + num_units * dir_count}; + std::vector seq_lengths_shape = {batch_size}; + auto shape_to_str = [](const std::vector& v) { + return strings::StrCat("[", absl::StrJoin(v, ","), "]"); + }; + string input_shapes_desc = strings::StrCat( + shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", + shape_to_str(input_c_shape), ";", "[?]", ";", + shape_to_str(seq_lengths_shape)); + string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;[];?;?"; + + ShapeInferenceTestOp op("CudnnRNNV3"); + TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") + .Input({"input", 0, DT_FLOAT}) + .Input({"input_h", 0, DT_FLOAT}) + .Input({"input_c", 0, DT_FLOAT}) + .Input({"params", 0, DT_FLOAT}) + .Input({"sequence_lengths", 0, DT_INT32}) + .Attr("rnn_mode", "gru") + .Attr("input_mode", "auto_select") + .Attr("direction", "unidirectional") + .Finalize(&op.node_def)); + INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[?];[]"); } } // end namespace tensorflow diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc index f0d85244b7b2d1..8861d22966fa1f 100644 --- a/tensorflow/core/ops/math_ops.cc +++ b/tensorflow/core/ops/math_ops.cc @@ -1449,6 +1449,13 @@ Status RangeSize(const Tensor* start_t, const Tensor* limit_t, Eigen::numext::abs(delta)) : (Eigen::numext::ceil( Eigen::numext::abs((limit - start) / delta)))); + + // Undefined behaviour if size will not fit into int64_t + if (size > std::numeric_limits::max()) { + return errors::InvalidArgument("Requires ((limit - start) / delta) <= ", + std::numeric_limits::max()); + } + c->set_output(0, c->Vector(static_cast(size))); return Status::OK(); } @@ -1656,6 +1663,11 @@ REGISTER_OP("Bincount") return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } + // Return `[size]` shape if size is known. int32 size_val = size_tensor->scalar()(); if (size_val < 0) { @@ -1687,6 +1699,10 @@ REGISTER_OP("DenseBincount") c->set_output(0, c->UnknownShape()); return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } int64 size_val; DataType dtype; @@ -1728,6 +1744,10 @@ REGISTER_OP("SparseBincount") c->set_output(0, c->UnknownShape()); return Status::OK(); } + if (size_tensor->dims() != 0) { + return errors::InvalidArgument("Shape must be rank 0 but is rank ", + size_tensor->dims()); + } int64 size_val; DataType dtype; diff --git a/tensorflow/core/ops/ragged_array_ops.cc b/tensorflow/core/ops/ragged_array_ops.cc index 4eefa6181c2214..f113c779195002 100644 --- a/tensorflow/core/ops/ragged_array_ops.cc +++ b/tensorflow/core/ops/ragged_array_ops.cc @@ -64,6 +64,7 @@ REGISTER_OP("RaggedCross") .SetShapeFn([](shape_inference::InferenceContext* c) { std::vector ragged_values_types; std::vector ragged_splits_types; + std::vector sparse_values_types; std::vector dense_types; TF_RETURN_IF_ERROR( @@ -71,15 +72,21 @@ REGISTER_OP("RaggedCross") TF_RETURN_IF_ERROR( c->GetAttr("ragged_splits_types", &ragged_splits_types)); TF_RETURN_IF_ERROR(c->GetAttr("dense_types", &dense_types)); + TF_RETURN_IF_ERROR( + c->GetAttr("sparse_values_types", &sparse_values_types)); int num_ragged = ragged_values_types.size(); if (num_ragged != ragged_splits_types.size()) { return errors::InvalidArgument( - "Parameters `values` and `row_splits` must be the same length"); + "ragged values and splits must have the same length."); } int num_sparse; TF_RETURN_IF_ERROR(c->GetAttr("Nsparse", &num_sparse)); + if (num_sparse != sparse_values_types.size()) { + return errors::InvalidArgument( + "sparse indices and values must have the same length"); + } ShapeHandle out_values = c->UnknownShapeOfRank(1); ShapeHandle out_splits = c->UnknownShapeOfRank(1); @@ -99,7 +106,14 @@ REGISTER_OP("RaggedCross") int dense_start = num_ragged * 2 + num_sparse * 3; for (int i = 0; i < dense_types.size(); ++i) { ShapeHandle dense_input = c->input(i + dense_start); - int64 batch_size = c->Value(c->Dim(dense_input, 0)); + int32 rank = c->Rank(dense_input); + if (rank == InferenceContext::kUnknownRank) { + continue; + } else if (rank != 2) { + return errors::InvalidArgument( + "tf.ragged.cross only supports inputs with rank=2"); + } + int64_t batch_size = c->Value(c->Dim(dense_input, 0)); if (batch_size != InferenceContext::kUnknownDim) { ShapeHandle row_splits = c->Vector(batch_size + 1); if (!c->Merge(out_splits, row_splits, &out_splits).ok()) { diff --git a/tensorflow/core/ops/sparse_ops.cc b/tensorflow/core/ops/sparse_ops.cc index 906cef1f5ecafe..c79aeebd94bd33 100644 --- a/tensorflow/core/ops/sparse_ops.cc +++ b/tensorflow/core/ops/sparse_ops.cc @@ -16,6 +16,8 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -158,6 +160,8 @@ REGISTER_OP("DeserializeSparse") .Attr("Tserialized: {string, variant} = DT_STRING") .SetShapeFn([](InferenceContext* c) { // serialized sparse is [?, ..., ?, 3] vector. + ShapeHandle unused_shape; + TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused_shape)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), -1), 3, &unused)); c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, @@ -619,6 +623,8 @@ REGISTER_OP("SparseFillEmptyRows") DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input_indices, 1), c->Dim(input_shape, 0), &unused_dim)); + if (c->Value(c->NumElements(input_shape)) == 0) + return errors::InvalidArgument("dense_shape must not be empty"); ShapeHandle output_indices = c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape)); ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim); diff --git a/tensorflow/core/ops/tpu_cross_replica_ops.cc b/tensorflow/core/ops/tpu_cross_replica_ops.cc index 1f10fe3136dd1c..404c3e59eff941 100644 --- a/tensorflow/core/ops/tpu_cross_replica_ops.cc +++ b/tensorflow/core/ops/tpu_cross_replica_ops.cc @@ -32,17 +32,35 @@ REGISTER_OP("AllToAll") .Attr("split_count: int") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); + ShapeHandle group_assignment = c->input(1); + int64 rank; if (c->RankKnown(input)) { rank = c->Rank(input); } else { return errors::InvalidArgument("input's rank is unknown."); } + int concat_dimension; int split_dimension; int split_count; TF_RETURN_IF_ERROR(c->GetAttr("split_count", &split_count)); + if (split_count < 1) { + return errors::InvalidArgument("split_count ", split_count, + " must at least be one."); + } + if (c->RankKnown(group_assignment) && c->Rank(group_assignment) != 2) { + return errors::InvalidArgument("group_assignment must have rank 2."); + } + DimensionHandle num_replicas_per_group = c->Dim(group_assignment, 1); + if (c->ValueKnown(num_replicas_per_group) && + (c->Value(num_replicas_per_group) != split_count)) { + return errors::InvalidArgument( + "split_count ", split_count, + " must equal the size of the second dimension of group_assignment ", + c->Value(num_replicas_per_group)); + } TF_RETURN_IF_ERROR(c->GetAttr("concat_dimension", &concat_dimension)); @@ -66,6 +84,12 @@ REGISTER_OP("AllToAll") dims[i] = c->MakeDim(c->Value(dims[i]) * split_count); } if (i == split_dimension) { + if (c->ValueKnown(dims[i]) && + (c->Value(dims[i]) % split_count != 0)) { + return errors::InvalidArgument( + "input dimension ", c->Value(dims[i]), + " not divisible by split_count ", split_count); + } dims[i] = c->MakeDim(c->Value(dims[i]) / split_count); } } diff --git a/tensorflow/core/platform/BUILD b/tensorflow/core/platform/BUILD index 1e0fa66e353675..e5707750eba93a 100644 --- a/tensorflow/core/platform/BUILD +++ b/tensorflow/core/platform/BUILD @@ -667,6 +667,24 @@ cc_library( ], ) +cc_library( + name = "statusor", + srcs = [ + "statusor.cc", + "statusor_internals.h", + ], + hdrs = ["statusor.h"], + deps = [ + ":logging", + ":macros", + ":status", + "//tensorflow/core/lib/core:errors", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "str_util", srcs = ["str_util.cc"], @@ -887,6 +905,18 @@ tf_cc_test( ], ) +tf_cc_test( + name = "statusor_test", + size = "small", + srcs = ["statusor_test.cc"], + deps = [ + ":statusor", + "//tensorflow/core:lib", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + ], +) + # This is a hacky, do-nothing, binary that makes it easy to verify ability to # build, link, and in some cases run all of the libraries under platform. # Realistically, most of this would be covered by tests but at this point @@ -1404,6 +1434,7 @@ filegroup( "stacktrace.h", "stacktrace_handler.h", "status.h", + "statusor.h", "str_util.h", "strcat.h", "stringpiece.h", @@ -1651,6 +1682,9 @@ filegroup( "status.cc", "stack_frame.h", "status.h", + "statusor.h", + "statusor_internals.h", + "statusor.cc", "str_util.cc", "str_util.h", "strcat.cc", diff --git a/tensorflow/core/platform/ctstring_internal.h b/tensorflow/core/platform/ctstring_internal.h index 9524267176c24e..9a01d48fb1e91f 100644 --- a/tensorflow/core/platform/ctstring_internal.h +++ b/tensorflow/core/platform/ctstring_internal.h @@ -63,9 +63,9 @@ static inline uint32_t TF_swap32(uint32_t host_int) { #endif #if TF_TSTRING_LITTLE_ENDIAN -#define TF_le32toh(x) TF_swap32(x) -#else // TF_TSTRING_LITTLE_ENDIAN #define TF_le32toh(x) x +#else // TF_TSTRING_LITTLE_ENDIAN +#define TF_le32toh(x) TF_swap32(x) #endif // TF_TSTRING_LITTLE_ENDIAN static inline size_t TF_align16(size_t i) { return (i + 0xF) & ~0xF; } diff --git a/tensorflow/core/platform/ctstring_test.cc b/tensorflow/core/platform/ctstring_test.cc index 4d82bcd87c327b..8624cc4ee73b49 100644 --- a/tensorflow/core/platform/ctstring_test.cc +++ b/tensorflow/core/platform/ctstring_test.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include +#include "tensorflow/core/platform/ctstring_internal.h" #include "tensorflow/core/platform/test.h" static const char kLongString[] = @@ -329,3 +330,29 @@ TEST(TF_CTStringTest, ResizeReserve) { TF_TString_Dealloc(&s70); } } + +TEST(TF_CTStringTest, OffsetType) { + { + TF_TString s71; + + TF_TString_Init(&s71); + size_t header_length = 24; + size_t size = 8; + TF_TString_ResizeUninitialized(&s71, header_length + size); + uint32_t save_size = s71.u.offset.size; + uint32_t save_offset = s71.u.offset.offset; + uint32_t save_count = s71.u.offset.count; + + s71.u.offset.size = TF_TString_ToInternalSizeT(size, TF_TSTR_OFFSET); + s71.u.offset.offset = header_length; + s71.u.offset.count = 0; + EXPECT_EQ(size, TF_TString_GetSize(&s71)); + EXPECT_EQ(TF_TSTR_OFFSET, TF_TString_GetType(&s71)); + + // restore state so string can be deallocated + s71.u.offset.size = save_size; + s71.u.offset.offset = save_offset; + s71.u.offset.count = save_count; + TF_TString_Dealloc(&s71); + } +} diff --git a/tensorflow/stream_executor/lib/statusor.cc b/tensorflow/core/platform/statusor.cc similarity index 89% rename from tensorflow/stream_executor/lib/statusor.cc rename to tensorflow/core/platform/statusor.cc index e0e851f96ef6fe..55d7df37c2b6b5 100644 --- a/tensorflow/stream_executor/lib/statusor.cc +++ b/tensorflow/core/platform/statusor.cc @@ -13,13 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/stream_executor/lib/statusor.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace internal_statusor { void Helper::HandleInvalidStatusCtorArg(Status* status) { @@ -36,5 +35,4 @@ void Helper::Crash(const Status& status) { } } // namespace internal_statusor -} // namespace port -} // namespace stream_executor +} // namespace tensorflow diff --git a/tensorflow/core/platform/statusor.h b/tensorflow/core/platform/statusor.h new file mode 100644 index 00000000000000..94cd3e484b9fc0 --- /dev/null +++ b/tensorflow/core/platform/statusor.h @@ -0,0 +1,393 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// StatusOr is the union of a Status object and a T object. StatusOr models +// the concept of an object that is either a value, or an error Status +// explaining why such a value is not present. To this end, StatusOr does not +// allow its Status value to be Status::OK. +// +// The primary use-case for StatusOr is as the return value of a +// function which may fail. +// +// Example client usage for a StatusOr, where T is not a pointer: +// +// StatusOr result = DoBigCalculationThatCouldFail(); +// if (result.ok()) { +// float answer = result.ValueOrDie(); +// printf("Big calculation yielded: %f", answer); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr: +// +// StatusOr result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo(result.ValueOrDie()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr>: +// +// StatusOr> result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo = std::move(result.ValueOrDie()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example factory implementation returning StatusOr: +// +// StatusOr FooFactory::MakeNewFoo(int arg) { +// if (arg <= 0) { +// return tensorflow::InvalidArgument("Arg must be positive"); +// } else { +// return new Foo(arg); +// } +// } +// +// Note that the assignment operators require that destroying the currently +// stored value cannot invalidate the argument; in other words, the argument +// cannot be an alias for the current value, or anything owned by the current +// value. +#ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ +#define TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ + +#include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/statusor_internals.h" + +namespace tensorflow { + +#if defined(__clang__) +// Only clang supports warn_unused_result as a type annotation. +template +class TF_MUST_USE_RESULT StatusOr; +#endif + +template +class StatusOr : private internal_statusor::StatusOrData, + private internal_statusor::TraitsBase< + std::is_copy_constructible::value, + std::is_move_constructible::value> { + template + friend class StatusOr; + + typedef internal_statusor::StatusOrData Base; + + public: + typedef T element_type; // DEPRECATED: use `value_type`. + typedef T value_type; + + // Constructs a new StatusOr with Status::UNKNOWN status. This is marked + // 'explicit' to try to catch cases like 'return {};', where people think + // StatusOr> will be initialized with an empty vector, + // instead of a Status::UNKNOWN status. + explicit StatusOr(); + + // StatusOr will be copy constructible/assignable if T is copy + // constructible. + StatusOr(const StatusOr&) = default; + StatusOr& operator=(const StatusOr&) = default; + + // StatusOr will be move constructible/assignable if T is move + // constructible. + StatusOr(StatusOr&&) = default; + StatusOr& operator=(StatusOr&&) = default; + + // Conversion copy/move constructor, T must be convertible from U. + template ::value>::type* = nullptr> + StatusOr(const StatusOr& other); + template ::value>::type* = nullptr> + StatusOr(StatusOr&& other); + + // Conversion copy/move assignment operator, T must be convertible from U. + template ::value>::type* = nullptr> + StatusOr& operator=(const StatusOr& other); + template ::value>::type* = nullptr> + StatusOr& operator=(StatusOr&& other); + + // Constructs a new StatusOr with the given value. After calling this + // constructor, calls to ValueOrDie() will succeed, and calls to status() will + // return OK. + // + // NOTE: Not explicit - we want to use StatusOr as a return type + // so it is convenient and sensible to be able to do 'return T()' + // when the return type is StatusOr. + // + // REQUIRES: T is copy constructible. + StatusOr(const T& value); + + // Constructs a new StatusOr with the given non-ok status. After calling + // this constructor, calls to ValueOrDie() will CHECK-fail. + // + // NOTE: Not explicit - we want to use StatusOr as a return + // value, so it is convenient and sensible to be able to do 'return + // Status()' when the return type is StatusOr. + // + // REQUIRES: !status.ok(). This requirement is DCHECKed. + // In optimized builds, passing Status::OK() here will have the effect + // of passing tensorflow::error::INTERNAL as a fallback. + StatusOr(const Status& status); + StatusOr& operator=(const Status& status); + + // TODO(b/62186997): Add operator=(T) overloads. + + // Similar to the `const T&` overload. + // + // REQUIRES: T is move constructible. + StatusOr(T&& value); + + // RValue versions of the operations declared above. + StatusOr(Status&& status); + StatusOr& operator=(Status&& status); + + // Returns this->status().ok() + bool ok() const { return this->status_.ok(); } + + // Returns a reference to our status. If this contains a T, then + // returns Status::OK(). + const Status& status() const &; + Status status() &&; + + // Returns a reference to our current value, or CHECK-fails if !this->ok(). + // + // Note: for value types that are cheap to copy, prefer simple code: + // + // T value = statusor.ValueOrDie(); + // + // Otherwise, if the value type is expensive to copy, but can be left + // in the StatusOr, simply assign to a reference: + // + // T& value = statusor.ValueOrDie(); // or `const T&` + // + // Otherwise, if the value type supports an efficient move, it can be + // used as follows: + // + // T value = std::move(statusor).ValueOrDie(); + // + // The std::move on statusor instead of on the whole expression enables + // warnings about possible uses of the statusor object after the move. + // C++ style guide waiver for ref-qualified overloads granted in cl/143176389 + // See go/ref-qualifiers for more details on such overloads. + const T& ValueOrDie() const &; + T& ValueOrDie() &; + const T&& ValueOrDie() const &&; + T&& ValueOrDie() &&; + + // Returns a reference to the current value. + // + // REQUIRES: this->ok() == true, otherwise the behavior is undefined. + // + // Use this->ok() or `operator bool()` to verify that there is a current + // value. Alternatively, see ValueOrDie() for a similar API that guarantees + // CHECK-failing if there is no current value. + const T& operator*() const&; + T& operator*() &; + const T&& operator*() const&&; + T&& operator*() &&; + + // Returns a pointer to the current value. + // + // REQUIRES: this->ok() == true, otherwise the behavior is undefined. + // + // Use this->ok() or `operator bool()` to verify that there is a current + // value. + const T* operator->() const; + T* operator->(); + + T ConsumeValueOrDie() { return std::move(ValueOrDie()); } + + // Ignores any errors. This method does nothing except potentially suppress + // complaints from any tools that are checking that errors are not dropped on + // the floor. + void IgnoreError() const; +}; + +//////////////////////////////////////////////////////////////////////////////// +// Implementation details for StatusOr + +template +StatusOr::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {} + +template +StatusOr::StatusOr(const T& value) : Base(value) {} + +template +StatusOr::StatusOr(const Status& status) : Base(status) {} + +template +StatusOr& StatusOr::operator=(const Status& status) { + this->Assign(status); + return *this; +} + +template +StatusOr::StatusOr(T&& value) : Base(std::move(value)) {} + +template +StatusOr::StatusOr(Status&& status) : Base(std::move(status)) {} + +template +StatusOr& StatusOr::operator=(Status&& status) { + this->Assign(std::move(status)); + return *this; +} + +template +template ::value>::type*> +inline StatusOr::StatusOr(const StatusOr& other) + : Base(static_cast::Base&>(other)) {} + +template +template ::value>::type*> +inline StatusOr& StatusOr::operator=(const StatusOr& other) { + if (other.ok()) + this->Assign(other.ValueOrDie()); + else + this->Assign(other.status()); + return *this; +} + +template +template ::value>::type*> +inline StatusOr::StatusOr(StatusOr&& other) + : Base(static_cast::Base&&>(other)) {} + +template +template ::value>::type*> +inline StatusOr& StatusOr::operator=(StatusOr&& other) { + if (other.ok()) { + this->Assign(std::move(other).ValueOrDie()); + } else { + this->Assign(std::move(other).status()); + } + return *this; +} + +template +const Status& StatusOr::status() const & { + return this->status_; +} +template +Status StatusOr::status() && { + // Note that we copy instead of moving the status here so that + // ~StatusOrData() can call ok() without invoking UB. + return ok() ? Status::OK() : this->status_; +} + +template +const T& StatusOr::ValueOrDie() const & { + this->EnsureOk(); + return this->data_; +} + +template +T& StatusOr::ValueOrDie() & { + this->EnsureOk(); + return this->data_; +} + +template +const T&& StatusOr::ValueOrDie() const && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +T&& StatusOr::ValueOrDie() && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +const T* StatusOr::operator->() const { + this->EnsureOk(); + return &this->data_; +} + +template +T* StatusOr::operator->() { + this->EnsureOk(); + return &this->data_; +} + +template +const T& StatusOr::operator*() const& { + this->EnsureOk(); + return this->data_; +} + +template +T& StatusOr::operator*() & { + this->EnsureOk(); + return this->data_; +} + +template +const T&& StatusOr::operator*() const&& { + this->EnsureOk(); + return std::move(this->data_); +} + +template +T&& StatusOr::operator*() && { + this->EnsureOk(); + return std::move(this->data_); +} + +template +void StatusOr::IgnoreError() const { + // no-op +} + +#define TF_ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ + TF_ASSERT_OK_AND_ASSIGN_IMPL( \ + TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, \ + rexpr); + +#define TF_ASSERT_OK_AND_ASSIGN_IMPL(statusor, lhs, rexpr) \ + auto statusor = (rexpr); \ + ASSERT_TRUE(statusor.status().ok()) << statusor.status(); \ + lhs = std::move(statusor.ValueOrDie()) + +#define TF_STATUS_MACROS_CONCAT_NAME(x, y) TF_STATUS_MACROS_CONCAT_IMPL(x, y) +#define TF_STATUS_MACROS_CONCAT_IMPL(x, y) x##y + +#define TF_ASSIGN_OR_RETURN(lhs, rexpr) \ + TF_ASSIGN_OR_RETURN_IMPL( \ + TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr) + +#define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \ + auto statusor = (rexpr); \ + if (TF_PREDICT_FALSE(!statusor.ok())) { \ + return statusor.status(); \ + } \ + lhs = std::move(statusor.ValueOrDie()) + +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_PLATFORM_STATUSOR_H_ diff --git a/tensorflow/stream_executor/lib/statusor_internals.h b/tensorflow/core/platform/statusor_internals.h similarity index 95% rename from tensorflow/stream_executor/lib/statusor_internals.h rename to tensorflow/core/platform/statusor_internals.h index d3a6026f4725c0..ebd48e4c29c988 100644 --- a/tensorflow/stream_executor/lib/statusor_internals.h +++ b/tensorflow/core/platform/statusor_internals.h @@ -13,14 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ -#define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ +#ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ +#define TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ #include "tensorflow/core/platform/macros.h" -#include "tensorflow/stream_executor/lib/status.h" +#include "tensorflow/core/platform/status.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace internal_statusor { class Helper { @@ -243,7 +242,6 @@ struct TraitsBase { }; } // namespace internal_statusor -} // namespace port -} // namespace stream_executor +} // namespace tensorflow -#endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_ +#endif // TENSORFLOW_CORE_PLATFORM_STATUSOR_INTERNALS_H_ diff --git a/tensorflow/stream_executor/lib/statusor_test.cc b/tensorflow/core/platform/statusor_test.cc similarity index 99% rename from tensorflow/stream_executor/lib/statusor_test.cc rename to tensorflow/core/platform/statusor_test.cc index 6b59eaa402923f..ba6b2a8c73fbd1 100644 --- a/tensorflow/stream_executor/lib/statusor_test.cc +++ b/tensorflow/core/platform/statusor_test.cc @@ -15,18 +15,17 @@ limitations under the License. // Unit tests for StatusOr -#include "tensorflow/stream_executor/lib/statusor.h" +#include "tensorflow/core/platform/statusor.h" #include #include -#include "tensorflow/core/platform/test.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" -namespace stream_executor { -namespace port { +namespace tensorflow { namespace { class Base1 { @@ -672,5 +671,4 @@ void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) { BENCHMARK(BM_StatusOrFactoryFailLongMsg); } // namespace -} // namespace port -} // namespace stream_executor +} // namespace tensorflow diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 48d7971a519660..03b3d08ffd0f34 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 5 -#define TF_PATCH_VERSION 0 +#define TF_PATCH_VERSION 3 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/core/util/image_resizer_state.h b/tensorflow/core/util/image_resizer_state.h index 84459c9447edc2..e554b208e14fff 100644 --- a/tensorflow/core/util/image_resizer_state.h +++ b/tensorflow/core/util/image_resizer_state.h @@ -135,11 +135,16 @@ struct ImageResizerState { void ValidateAndCreateOutput(OpKernelContext* context, const Tensor& input) { ValidateAndCalculateOutputSize(context, input); if (!context->status().ok()) return; - OP_REQUIRES_OK(context, context->allocate_output( - 0, - TensorShape({input.dim_size(0), out_height, - out_width, input.dim_size(3)}), - &output)); + + TensorShape shape; + // Guard against shape overflow + OP_REQUIRES_OK(context, shape.AddDimWithStatus(batch_size)); + + OP_REQUIRES_OK(context, shape.AddDimWithStatus(out_height)); + OP_REQUIRES_OK(context, shape.AddDimWithStatus(out_width)); + OP_REQUIRES_OK(context, shape.AddDimWithStatus(channels)); + + OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output)); } int64 batch_size; diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h index 1f9768f5163d25..27916095bfb1fd 100644 --- a/tensorflow/core/util/saved_tensor_slice_util.h +++ b/tensorflow/core/util/saved_tensor_slice_util.h @@ -59,6 +59,9 @@ Status ParseShapeAndSlice(const string& shape_and_slice, TensorShape* shape, template struct SaveTypeTraits; +template +int TensorProtoDataSize(const TensorProto& t); + template const typename SaveTypeTraits::SavedType* TensorProtoData( const TensorProto& t); @@ -95,6 +98,10 @@ void Fill(T* data, size_t n, TensorProto* t); #define TENSOR_PROTO_EXTRACT_TYPE(TYPE, FIELD, FTYPE) \ TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, FTYPE) \ template <> \ + inline int TensorProtoDataSize(const TensorProto& t) { \ + return t.FIELD##_val_size(); \ + } \ + template <> \ inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \ typename protobuf::RepeatedField copy(data, data + n); \ t->mutable_##FIELD##_val()->Swap(©); \ @@ -104,6 +111,10 @@ void Fill(T* data, size_t n, TensorProto* t); #define TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(TYPE, FIELD, FTYPE) \ TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, TYPE) \ template <> \ + inline int TensorProtoDataSize(const TensorProto& t) { \ + return t.FIELD##_val_size() / 2; \ + } \ + template <> \ inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \ const FTYPE* sub = reinterpret_cast(data); \ typename protobuf::RepeatedField copy(sub, sub + 2 * n); \ @@ -136,6 +147,11 @@ TENSOR_PROTO_EXTRACT_TYPE(quint16, int, int32); template <> struct SaveTypeTraits : SaveTypeTraits {}; +template <> +inline int TensorProtoDataSize(const TensorProto& t) { + return t.int_val_size(); +} + template <> inline const int32* TensorProtoData(const TensorProto& t) { static_assert(SaveTypeTraits::supported, @@ -158,6 +174,11 @@ struct SaveTypeTraits { typedef protobuf::RepeatedField RepeatedField; }; +template <> +inline int TensorProtoDataSize(const TensorProto& t) { + return t.half_val_size(); +} + template <> inline const int* TensorProtoData(const TensorProto& t) { return t.half_val().data(); @@ -187,6 +208,11 @@ struct SaveTypeTraits { typedef protobuf::RepeatedPtrField RepeatedField; }; +template <> +inline int TensorProtoDataSize(const TensorProto& t) { + return t.string_val_size(); +} + template <> inline const string* const* TensorProtoData(const TensorProto& t) { static_assert(SaveTypeTraits::supported, diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index 341290dbbc6982..be03390fef8ea1 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -30,10 +30,12 @@ limitations under the License. #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/sparse/dim_comparator.h" #include "tensorflow/core/util/sparse/group_iterator.h" +#include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace sparse { @@ -177,9 +179,9 @@ class SparseTensor { // element of the array representing one dimension. The start is the start // index at each dimension and the size is the size at each dimension. template - static SparseTensor Slice(const SparseTensor& tensor, - const gtl::ArraySlice& start, - const gtl::ArraySlice& size); + static StatusOr Slice(const SparseTensor& tensor, + const gtl::ArraySlice start, + const gtl::ArraySlice size); // Picks out the dimensions according to `dim_indices`. std::vector PickDims(gtl::ArraySlice dim_indices) const { @@ -577,9 +579,9 @@ inline Status SparseTensor::Split(const SparseTensor& input_tensor, } template -inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, - const gtl::ArraySlice& start, - const gtl::ArraySlice& size) { +inline StatusOr SparseTensor::Slice( + const SparseTensor& input_tensor, const gtl::ArraySlice start, + const gtl::ArraySlice size) { TensorShape output_shape(input_tensor.shape()); const int dims = input_tensor.dims(); @@ -590,15 +592,16 @@ inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, const int64 input_size = output_shape.dim_size(dim); const int64 start_index = start[dim]; const int64 slice_size = size[dim]; - if (start_index + slice_size < input_size) { + if (start_index < input_size - slice_size) { // The entire selection is within input boundaries. - output_shape.set_dim(dim, slice_size); + TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, slice_size)); } else if (start_index < input_size) { // The selection starts within input boundaries, but goes beyond them. - output_shape.set_dim(dim, input_size - start_index); + TF_RETURN_IF_ERROR( + output_shape.SetDimWithStatus(dim, input_size - start_index)); } else { // The selection is entirely out of input boundaries. - output_shape.set_dim(dim, 0); + TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, 0)); } } diff --git a/tensorflow/core/util/sparse/sparse_tensor_test.cc b/tensorflow/core/util/sparse/sparse_tensor_test.cc index f898ba586126cd..df1fa6f082ff78 100644 --- a/tensorflow/core/util/sparse/sparse_tensor_test.cc +++ b/tensorflow/core/util/sparse/sparse_tensor_test.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" @@ -690,7 +691,8 @@ TEST(SparseTensorTest, Slice) { size[0] = 2; size[1] = 3; - SparseTensor slice = SparseTensor::Slice(st, start, size); + TF_ASSERT_OK_AND_ASSIGN(SparseTensor slice, + SparseTensor::Slice(st, start, size)); EXPECT_EQ(TensorShape(slice.shape()), TensorShape({2, 3})); EXPECT_EQ(slice.values().NumElements(), 3); @@ -724,8 +726,9 @@ TEST(SparseTensorTest, SliceReducesOutputDimension) { TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({num_rows, num_columns}), &st)); - SparseTensor slice = - SparseTensor::Slice(st, {num_rows + 1, 1}, {1, num_columns}); + TF_ASSERT_OK_AND_ASSIGN( + SparseTensor slice, + SparseTensor::Slice(st, {num_rows + 1, 1}, {1, num_columns})); EXPECT_EQ(TensorShape(slice.shape()), TensorShape({0, 1})); } diff --git a/tensorflow/core/util/tensor_slice_reader.cc b/tensorflow/core/util/tensor_slice_reader.cc index 58c4c22ce7a9e2..fd98d1b4041a30 100644 --- a/tensorflow/core/util/tensor_slice_reader.cc +++ b/tensorflow/core/util/tensor_slice_reader.cc @@ -168,9 +168,13 @@ void TensorSliceReader::LoadShard(int shard) const { "checkpoint"); if (!status_.ok()) return; for (const SavedSliceMeta& ssm : sts.meta().tensor()) { - TensorShape ssm_shape(ssm.shape()); + TensorShape ssm_shape; + status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape); + if (!status_.ok()) return; for (const TensorSliceProto& tsp : ssm.slice()) { - TensorSlice ss_slice(tsp); + TensorSlice ss_slice; + status_ = TensorSlice::BuildTensorSlice(tsp, &ss_slice); + if (!status_.ok()) return; status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname, ss_slice, &tensors_); if (!status_.ok()) return; @@ -248,7 +252,9 @@ Status TensorSliceReader::GetTensor( slice = tss->Slices().begin()->second.slice; } - std::unique_ptr t(new tensorflow::Tensor(type, shape)); + std::unique_ptr t(new tensorflow::Tensor); + Status s = tensorflow::Tensor::BuildTensor(type, shape, t.get()); + if (!s.ok()) return s; bool success = false; #define READER_COPY(dt) \ diff --git a/tensorflow/core/util/tensor_slice_reader.h b/tensorflow/core/util/tensor_slice_reader.h index 0fb2e11bf8dd08..bc0a91523fe36c 100644 --- a/tensorflow/core/util/tensor_slice_reader.h +++ b/tensorflow/core/util/tensor_slice_reader.h @@ -181,6 +181,22 @@ bool TensorSliceReader::CopySliceData(const string& name, << slice_s.DebugString() << ": computed key = " << key; return false; } + // Ensure the TensorSlice contains the expected amount of data. + TensorShape shp_s; + Status s = slice_s.SliceTensorShape(tss->shape(), &shp_s); + if (!s.ok()) { + VLOG(1) << "Failed to slice tensor " << name << ", slice " + << slice_s.DebugString() << ": " << s; + return false; + } + if (checkpoint::TensorProtoDataSize(sts.data().data()) != + shp_s.num_elements()) { + VLOG(1) << "Tensor " << name << ", slice " << slice_s.DebugString() + << " had an unexpected amount of data: expected = " + << shp_s.num_elements() << ", got = " + << checkpoint::TensorProtoDataSize(sts.data().data()); + return false; + } CopyDataFromTensorSliceToTensorSlice( tss->shape(), slice_s, slice, checkpoint::TensorProtoData(sts.data().data()), data); diff --git a/tensorflow/core/util/tensor_slice_reader_test.cc b/tensorflow/core/util/tensor_slice_reader_test.cc index fe617e8e30d67e..eb853886071541 100644 --- a/tensorflow/core/util/tensor_slice_reader_test.cc +++ b/tensorflow/core/util/tensor_slice_reader_test.cc @@ -13,15 +13,20 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include - #include "tensorflow/core/util/tensor_slice_reader.h" +#include +#include + +#include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/stringpiece.h" +#include "tensorflow/core/lib/io/iterator.h" #include "tensorflow/core/lib/io/path.h" +#include "tensorflow/core/lib/io/table.h" +#include "tensorflow/core/lib/io/table_builder.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" @@ -30,6 +35,7 @@ limitations under the License. #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/version.h" +#include "tensorflow/core/util/saved_tensor_slice.pb.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" #include "tensorflow/core/util/tensor_slice_writer.h" @@ -309,6 +315,177 @@ TEST_SIMPLE_INT(int16, int32) TEST_SIMPLE_INT(int8, int32) TEST_SIMPLE_INT(uint8, int32) +// Modifies the SavedTensorSlices messages in a checkpoint to allow creating +// malformed or unsupported checkpoints. +void MutateSavedTensorSlices( + const std::string& fname, + const std::function& mutator) { + table::Options options; + options.compression = table::kNoCompression; + + // Read all entres from the table. + std::vector> entries; + { + std::unique_ptr file; + TF_CHECK_OK(Env::Default()->NewRandomAccessFile(fname, &file)); + uint64 file_size; + TF_CHECK_OK(Env::Default()->GetFileSize(fname, &file_size)); + table::Table* t; + TF_CHECK_OK(table::Table::Open(options, file.get(), file_size, &t)); + std::unique_ptr table(t); + std::unique_ptr it(table->NewIterator()); + for (it->Seek(""); it->Valid(); it->Next()) { + entries.emplace_back(it->key(), it->value()); + } + TF_CHECK_OK(it->status()); + } + + // Rewrite the table, mutating each value. + { + std::unique_ptr file; + TF_CHECK_OK(Env::Default()->NewWritableFile(fname, &file)); + table::TableBuilder builder(options, file.get()); + for (const auto& entry : entries) { + SavedTensorSlices sts; + CHECK(sts.ParseFromString(entry.second)); + builder.Add(entry.first, mutator(std::move(sts))); + } + TF_CHECK_OK(builder.Finish()); + TF_CHECK_OK(file->Close()); + } +} + +TEST(TensorSliceReaderTest, MissingTensorType) { + const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint"); + TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); + const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + TensorShape shape({4, 5}); + TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); + TF_CHECK_OK(writer.Add("test", shape, slice, data)); + TF_CHECK_OK(writer.Finish()); + + MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { + if (sts.has_meta()) { + for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { + tensor.clear_type(); + } + } + return sts.SerializeAsString(); + }); + + TensorSliceReader reader(fname, OpenTableTensorSliceReader); + TF_CHECK_OK(reader.status()); + + // The tensor should be present, but loading it should fail due to the + // unset (invalid) type. + EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); + std::unique_ptr tensor; + EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); +} + +TEST(TensorSliceReaderTest, UnsupportedTensorType) { + const string fname = io::JoinPath(testing::TmpDir(), "int32_ref_checkpoint"); + TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); + const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + TensorShape shape({4, 5}); + TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); + TF_CHECK_OK(writer.Add("test", shape, slice, data)); + TF_CHECK_OK(writer.Finish()); + + MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { + if (sts.has_meta()) { + for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { + tensor.set_type(DT_INT32_REF); + } + } + return sts.SerializeAsString(); + }); + + TensorSliceReader reader(fname, OpenTableTensorSliceReader); + TF_CHECK_OK(reader.status()); + + // The tensor should be present, but loading it should fail due to the + // unsupported type. + EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); + std::unique_ptr tensor; + EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); +} + +TEST(TensorSliceReaderTest, NegativeTensorShapeDimension) { + const string fname = + io::JoinPath(testing::TmpDir(), "negative_dim_checkpoint"); + TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); + const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), + TensorSlice::ParseOrDie("0,2:-"), data)); + TF_CHECK_OK(writer.Finish()); + + MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { + if (sts.has_meta()) { + for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { + for (auto& dim : *tensor.mutable_shape()->mutable_dim()) { + dim.set_size(-dim.size()); + } + } + } + return sts.SerializeAsString(); + }); + + TensorSliceReader reader(fname, OpenTableTensorSliceReader); + // The negative dimension should cause loading to fail. + EXPECT_FALSE(reader.status().ok()); +} + +TEST(TensorSliceReaderTest, InvalidTensorSlice) { + const string fname = + io::JoinPath(testing::TmpDir(), "invalid_slice_checkpoint"); + TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); + const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), + TensorSlice::ParseOrDie("0,2:-"), data)); + TF_CHECK_OK(writer.Finish()); + + MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { + if (sts.has_meta()) { + for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { + tensor.mutable_slice(0)->mutable_extent(0)->set_length(-10); + } + } + return sts.SerializeAsString(); + }); + + TensorSliceReader reader(fname, OpenTableTensorSliceReader); + // The negative exent length should cause loading to fail. + EXPECT_FALSE(reader.status().ok()); +} + +TEST(TensorSliceReaderTest, MissingTensorData) { + const string fname = + io::JoinPath(testing::TmpDir(), "missing_data_checkpoint"); + TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); + const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + TF_ASSERT_OK(writer.Add("test", TensorShape({4, 5}), + TensorSlice::ParseOrDie("0,2:-"), data)); + TF_ASSERT_OK(writer.Finish()); + + MutateSavedTensorSlices(fname, [&](SavedTensorSlices sts) { + if (sts.has_data()) { + // Replace the data with only 4 elements. + Fill(data, 4, sts.mutable_data()->mutable_data()); + } + return sts.SerializeAsString(); + }); + + TensorSliceReader reader(fname, OpenTableTensorSliceReader); + TF_ASSERT_OK(reader.status()); + + // The tensor should be present, but loading it should fail due to the missing + // data. + EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); + std::unique_ptr tensor; + EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); +} + void CachedTensorSliceReaderTesterHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, const TensorSliceReader::OpenTableFunction& open_function) { diff --git a/tensorflow/go/tensor.go b/tensorflow/go/tensor.go index cfb389d472dfab..6c58aeb81ff484 100644 --- a/tensorflow/go/tensor.go +++ b/tensorflow/go/tensor.go @@ -98,9 +98,9 @@ func NewTensor(value interface{}) (*Tensor, error) { raw := tensorData(t.c) - runtime.SetFinalizer(t, func(t *Tensor) { + defer runtime.SetFinalizer(t, func(t *Tensor) { if dataType == String { - t.clearTStrings(raw, nflattened) + t.clearTStrings(raw, int64(nbytes/C.sizeof_TF_TString)) } t.finalize() @@ -111,7 +111,7 @@ func NewTensor(value interface{}) (*Tensor, error) { if isAllArray(val.Type()) { // We have arrays all the way down, or just primitive types. We can // just copy the memory in as it is all contiguous. - if err := copyPtr(buf, unpackEFace(value).data, int(val.Type().Size())); err != nil { + if _, err := copyPtr(buf, unpackEFace(value).data, int(val.Type().Size())); err != nil { return nil, err } } else { @@ -119,7 +119,10 @@ func NewTensor(value interface{}) (*Tensor, error) { // not be contiguous with the others or in the order we might // expect, so we need to work our way down to each slice of // primitives and copy them individually - if err := encodeTensorWithSlices(buf, val, shape); err != nil { + if n, err := encodeTensorWithSlices(buf, val, shape); err != nil { + // Set nbytes to count of bytes written for deferred call to + // runtime.SetFinalizer + nbytes = uintptr(n) return nil, err } } @@ -486,13 +489,13 @@ func sizeVarUint(v uint64) int { // encodeTensorWithSlices writes v to the specified buffer using the format specified in // c_api.h. Use stringEncoder for String tensors. -func encodeTensorWithSlices(w *bytes.Buffer, v reflect.Value, shape []int64) error { +func encodeTensorWithSlices(w *bytes.Buffer, v reflect.Value, shape []int64) (int, error) { // If current dimension is a slice, verify that it has the expected size // Go's type system makes that guarantee for arrays. if v.Kind() == reflect.Slice { expected := int(shape[0]) if v.Len() != expected { - return fmt.Errorf("mismatched slice lengths: %d and %d", v.Len(), expected) + return 0, fmt.Errorf("mismatched slice lengths: %d and %d", v.Len(), expected) } } else if v.Kind() == reflect.String { s := v.Interface().(string) @@ -501,7 +504,7 @@ func encodeTensorWithSlices(w *bytes.Buffer, v reflect.Value, shape []int64) err ptr := unsafe.Pointer(&tstr) return copyPtr(w, ptr, C.sizeof_TF_TString) } else if v.Kind() != reflect.Array { - return fmt.Errorf("unsupported type %v", v.Type()) + return 0, fmt.Errorf("unsupported type %v", v.Type()) } // Once we have just a single dimension we can just copy the data @@ -514,15 +517,17 @@ func encodeTensorWithSlices(w *bytes.Buffer, v reflect.Value, shape []int64) err return copyPtr(w, ptr, v.Len()*int(elt.Type().Size())) } + n := 0 subShape := shape[1:] for i := 0; i < v.Len(); i++ { - err := encodeTensorWithSlices(w, v.Index(i), subShape) + j, err := encodeTensorWithSlices(w, v.Index(i), subShape) if err != nil { - return err + return n+j, err } + n += j } - return nil + return n, nil } // It isn't safe to use reflect.SliceHeader as it uses a uintptr for Data and @@ -536,15 +541,14 @@ type sliceHeader struct { // copyPtr copies the backing data for a slice or array directly into w. Note // we don't need to worry about byte ordering because we want the natural byte // order for the machine we're running on. -func copyPtr(w *bytes.Buffer, ptr unsafe.Pointer, l int) error { +func copyPtr(w *bytes.Buffer, ptr unsafe.Pointer, l int) (int, error) { // Convert our slice header into a []byte so we can call w.Write b := *(*[]byte)(unsafe.Pointer(&sliceHeader{ Data: ptr, Len: l, Cap: l, })) - _, err := w.Write(b) - return err + return w.Write(b) } func bug(format string, args ...interface{}) error { diff --git a/tensorflow/lite/BUILD b/tensorflow/lite/BUILD index 8d16389badccbd..160a3d4e45a8ae 100644 --- a/tensorflow/lite/BUILD +++ b/tensorflow/lite/BUILD @@ -762,6 +762,7 @@ cc_library( copts = tflite_copts_warnings() + tflite_copts(), deps = [ ":kernel_api", + ":macros", "//tensorflow/lite/c:common", "//tensorflow/lite/schema:schema_fbs", ], @@ -787,6 +788,7 @@ cc_test( features = ["-dynamic_link_test_srcs"], # see go/dynamic_link_test_srcs deps = [ ":util", + "//tensorflow/lite/c:c_api_types", "//tensorflow/lite/c:common", "//tensorflow/lite/schema:schema_fbs", "@com_google_googletest//:gtest", diff --git a/tensorflow/lite/c/common.c b/tensorflow/lite/c/common.c index 00dd0260cbcc90..4e35a52ee6b5a3 100644 --- a/tensorflow/lite/c/common.c +++ b/tensorflow/lite/c/common.c @@ -21,7 +21,7 @@ limitations under the License. #include #endif // TF_LITE_STATIC_MEMORY -int TfLiteIntArrayGetSizeInBytes(int size) { +size_t TfLiteIntArrayGetSizeInBytes(int size) { static TfLiteIntArray dummy; return sizeof(dummy) + sizeof(dummy.data[0]) * size; } @@ -45,7 +45,7 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, #ifndef TF_LITE_STATIC_MEMORY TfLiteIntArray* TfLiteIntArrayCreate(int size) { - int alloc_size = TfLiteIntArrayGetSizeInBytes(size); + size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size); if (alloc_size <= 0) return NULL; TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); if (!ret) return ret; diff --git a/tensorflow/lite/c/common.h b/tensorflow/lite/c/common.h index 56e0f8d54f1ddf..9697310e2cd225 100644 --- a/tensorflow/lite/c/common.h +++ b/tensorflow/lite/c/common.h @@ -94,7 +94,7 @@ typedef struct TfLiteIntArray { // Given the size (number of elements) in a TfLiteIntArray, calculate its size // in bytes. -int TfLiteIntArrayGetSizeInBytes(int size); +size_t TfLiteIntArrayGetSizeInBytes(int size); #ifndef TF_LITE_STATIC_MEMORY // Create a array of a given `size` (uninitialized entries). diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 7dc45d3faf6285..fedcbf26114047 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -649,27 +649,6 @@ TfLiteStatus Subgraph::CheckInputAndOutputForOverlap(const int* input_indices, return kTfLiteOk; } -namespace { -// Multiply two sizes and return true if overflow occurred; -// This is based off tensorflow/overflow.h but is simpler as we already -// have unsigned numbers. It is also generalized to work where sizeof(size_t) -// is not 8. -TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) { - // Multiplying a * b where a and b are size_t cannot result in overflow in a - // size_t accumulator if both numbers have no non-zero bits in their upper - // half. - constexpr size_t size_t_bits = 8 * sizeof(size_t); - constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2; - *product = a * b; - // If neither integers have non-zero bits past 32 bits can't overflow. - // Otherwise check using slow devision. - if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) { - if (a != 0 && *product / a != b) return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace - TfLiteStatus Subgraph::BytesRequired(TfLiteType type, const int* dims, size_t dims_size, size_t* bytes) { TF_LITE_ENSURE(&context_, bytes != nullptr); diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index fdbcae3607f06c..f277477bc2e3d0 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -115,6 +115,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 4); + TF_LITE_ENSURE(context, params->dilation_height_factor > 0); + TF_LITE_ENSURE(context, params->dilation_width_factor > 0); const TfLiteType data_type = input->type; @@ -176,6 +178,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { if (data_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast( filter->quantization.params); @@ -195,6 +198,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } if (is_hybrid) { + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast( filter->quantization.params); @@ -495,6 +499,7 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, op_params.weights_offset = 0; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast(filter->quantization.params); if (kernel_type == kReference) { diff --git a/tensorflow/lite/kernels/div.cc b/tensorflow/lite/kernels/div.cc index f744b4ba1b7f63..51623a969d1b11 100644 --- a/tensorflow/lite/kernels/div.cc +++ b/tensorflow/lite/kernels/div.cc @@ -216,9 +216,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); - if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { + // TODO(b/193904910): This can written with C++ templates +#define TF_LITE_CHECK_DIV_NON_ZERO(data_type) \ + const auto* input2_data = GetTensorData(input2); \ + const size_t input2_elements = input2->bytes / sizeof(data_type); \ + for (size_t i = 0; i < input2_elements; i++) { \ + TF_LITE_ENSURE(context, input2_data[i] != 0); \ + } + + if (output->type == kTfLiteFloat32) { + // Div by zero seems ok in this case, just like in TF case infinities are + // returned. So we don't do a check at this point. + EvalDiv(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt32) { + TF_LITE_CHECK_DIV_NON_ZERO(int32_t); EvalDiv(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { + TF_LITE_CHECK_DIV_NON_ZERO(uint8_t); TF_LITE_ENSURE_OK( context, EvalQuantized(context, node, params, data, input1, input2, output)); @@ -229,6 +243,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { output->type); return kTfLiteError; } +#undef TF_LITE_CHECK_DIV_NON_ZERO return kTfLiteOk; } diff --git a/tensorflow/lite/kernels/embedding_lookup_sparse.cc b/tensorflow/lite/kernels/embedding_lookup_sparse.cc index 4ad1054340c9c3..270ccc929d9bd8 100644 --- a/tensorflow/lite/kernels/embedding_lookup_sparse.cc +++ b/tensorflow/lite/kernels/embedding_lookup_sparse.cc @@ -72,6 +72,7 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/util.h" namespace tflite { namespace ops { @@ -158,6 +159,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights)); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value)); + const size_t values_size = NumElements(value); const int lookup_rank = SizeOfDimension(indices, 1); const int embedding_rank = NumDimensions(value); @@ -175,25 +177,33 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); TF_LITE_ENSURE(context, output_shape != nullptr); int k = 0; - int embedding_size = 1; - int lookup_size = 1; + size_t embedding_size = 1; + size_t lookup_size = 1; for (int i = 0; i < lookup_rank - 1; i++, k++) { - const int dim = dense_shape->data.i32[i]; - lookup_size *= dim; + const size_t dim = dense_shape->data.i32[i]; + TF_LITE_ENSURE_MSG( + context, + MultiplyAndCheckOverflow(lookup_size, dim, &lookup_size) == kTfLiteOk, + "Lookup size overflowed."); output_shape->data[k] = dim; } for (int i = 1; i < embedding_rank; i++, k++) { - const int dim = SizeOfDimension(value, i); - embedding_size *= dim; + const size_t dim = SizeOfDimension(value, i); + TF_LITE_ENSURE_MSG(context, + MultiplyAndCheckOverflow(embedding_size, dim, + &embedding_size) == kTfLiteOk, + "Embedding size overflowed."); output_shape->data[k] = dim; } TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape)); - const int output_size = lookup_size * embedding_size; + const size_t output_size = lookup_size * embedding_size; TfLiteTensorRealloc(output_size * sizeof(float), output); float* output_ptr = GetTensorData(output); const float* weights_ptr = GetTensorData(weights); const float* value_ptr = GetTensorData(value); + // Makes sure reallocation was successful. + TF_LITE_ENSURE(context, output_ptr != nullptr); std::fill_n(output_ptr, output_size, 0.0f); @@ -244,6 +254,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { current_squares_weight += w * w; current_total_weight += w; for (int k = 0; k < embedding_size; k++) { + // only index if indices are valid + if (current_output_offset + k < 0) continue; + if (current_output_offset + k >= output_size) continue; + if (example_embedding_offset + k < 0) continue; + if (example_embedding_offset + k >= values_size) continue; output_ptr[current_output_offset + k] += value_ptr[example_embedding_offset + k] * w; } diff --git a/tensorflow/lite/kernels/expand_dims.cc b/tensorflow/lite/kernels/expand_dims.cc index 231ba6df8ba735..c8d0270551c192 100644 --- a/tensorflow/lite/kernels/expand_dims.cc +++ b/tensorflow/lite/kernels/expand_dims.cc @@ -37,6 +37,7 @@ TfLiteStatus ExpandTensorDim(TfLiteContext* context, const TfLiteTensor& input, axis = input_dims.size + 1 + axis; } TF_LITE_ENSURE(context, axis <= input_dims.size); + TF_LITE_ENSURE(context, axis >= 0); TfLiteIntArray* output_dims = TfLiteIntArrayCreate(input_dims.size + 1); for (int i = 0; i < output_dims->size; ++i) { diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index 6da963bd4407b8..a97415e3f908dc 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -223,6 +223,7 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) { } TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2); + TF_LITE_ENSURE(context, filter->dims->data[1] != 0); const int batch_size = input_size / filter->dims->data[1]; const int num_units = filter->dims->data[0]; @@ -878,6 +879,36 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } +// Verifies that sparsity values are valid given input/weight/output. +bool VerifySparsity(const RuntimeShape& weights_shape, + const RuntimeShape& input_shape, + const RuntimeShape& output_shape, + const TfLiteSparsity* sparsity) { + const int weights_dims_count = weights_shape.DimensionsCount(); + const int output_dims_count = output_shape.DimensionsCount(); + const int w0_size = sparsity->dim_metadata[0].dense_size; + const int accum_depth = weights_shape.Dims(weights_dims_count - 1); + const int output_elements = output_shape.FlatSize(); + const int input_elements = input_shape.FlatSize(); + const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); + const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2, + output_shape, output_dims_count - 1); + const int max_batch_index = batches - 1; + const int max_output = max_batch_index * output_depth + w0_size; + const int max_batch_depth = accum_depth * max_batch_index; + + // Verify output size is enough. + if (output_elements < max_output) return false; + + // Verify index from sparse in input is valid. + for (int i = 0; i < sparsity->dim_metadata[1].array_indices->size; ++i) { + if (input_elements <= + max_batch_depth + sparsity->dim_metadata[1].array_indices->data[i]) + return false; + } + return true; +} + template TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, @@ -918,24 +949,32 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, "Unsupported sparse fully-connected weight format."); return kTfLiteError; } + const auto& input_shape = GetTensorShape(input); + const auto& filter_shape = GetTensorShape(filter); + const auto& output_shape = GetTensorShape(output); + const auto& bias_shape = GetTensorShape(bias); + if (!VerifySparsity(filter_shape, input_shape, output_shape, &sparsity)) { + TF_LITE_KERNEL_LOG(context, "Invalid sparse fully-connected format."); + return kTfLiteError; + } if (sparsity.dim_metadata_size == kDimMetadataSizeRandomSparse) { // Random sparse. optimized_ops::FullyConnectedSparseWeight( - sparsity, op_params, GetTensorShape(input), - GetTensorData(input), GetTensorShape(filter), - GetTensorData(filter), GetTensorShape(bias), - GetTensorData(bias), GetTensorShape(output), - GetTensorData(output)); + sparsity, op_params, // Disable formatting + input_shape, GetTensorData(input), // Disable formatting + filter_shape, GetTensorData(filter), // Disable formatting + bias_shape, GetTensorData(bias), // Disable formatting + output_shape, GetTensorData(output)); } else if (sparsity.dim_metadata_size == kDimMetadataSizeBlockSparse && sparsity.dim_metadata[2].dense_size == 4) { // Block sparse with block size of 1x4. optimized_ops::FullyConnectedSparseWeight1x4( - sparsity, op_params, GetTensorShape(input), - GetTensorData(input), GetTensorShape(filter), - GetTensorData(filter), GetTensorShape(bias), - GetTensorData(bias), GetTensorShape(output), - GetTensorData(output), + sparsity, op_params, // Disable formatting + input_shape, GetTensorData(input), // Disable formatting + filter_shape, GetTensorData(filter), // Disable formatting + bias_shape, GetTensorData(bias), // Disable formatting + output_shape, GetTensorData(output), CpuBackendContext::GetFromContext(context)); } else { TF_LITE_KERNEL_LOG(context, diff --git a/tensorflow/lite/kernels/gather.cc b/tensorflow/lite/kernels/gather.cc index 9fe94821230c00..bdc2139d0fe7a5 100644 --- a/tensorflow/lite/kernels/gather.cc +++ b/tensorflow/lite/kernels/gather.cc @@ -117,8 +117,20 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } template -TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input, - const TfLiteTensor* positions, TfLiteTensor* output) { +TfLiteStatus Gather(TfLiteContext* context, const TfLiteGatherParams& params, + const TfLiteTensor* input, const TfLiteTensor* positions, + TfLiteTensor* output) { + const PositionsT* indexes = GetTensorData(positions); + bool indices_has_only_positive_elements = true; + const size_t num_indices = positions->bytes / sizeof(PositionsT); + for (size_t i = 0; i < num_indices; i++) { + if (indexes[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + tflite::GatherParams op_params; op_params.axis = params.axis; op_params.batch_dims = params.batch_dims; @@ -134,7 +146,18 @@ TfLiteStatus GatherStrings(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { DynamicBuffer buffer; + const PositionT* indexes = GetTensorData(positions); + bool indices_has_only_positive_elements = true; + const size_t num_indices = positions->bytes / sizeof(PositionT); + for (size_t i = 0; i < num_indices; i++) { + if (indexes[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + const PositionT num_strings = GetStringCount(input); const int num_indexes = NumElements(positions); @@ -163,19 +186,26 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteUInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt16: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt64: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteBool: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteString: return GatherStrings(context, input, positions, output); default: @@ -187,19 +217,26 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteUInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt16: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt64: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteBool: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteString: return GatherStrings(context, input, positions, output); default: diff --git a/tensorflow/lite/kernels/gather_nd.cc b/tensorflow/lite/kernels/gather_nd.cc index 3ded771382569e..c39917b478505f 100644 --- a/tensorflow/lite/kernels/gather_nd.cc +++ b/tensorflow/lite/kernels/gather_nd.cc @@ -123,6 +123,17 @@ TfLiteStatus GatherNdString(const TfLiteTensor* params, template TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { + bool indices_has_only_positive_elements = true; + const auto* indices_values = GetTensorData(indices); + const size_t num_indices = indices->bytes / sizeof(IndicesT); + for (size_t i = 0; i < num_indices; i++) { + if (indices_values[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + switch (params->type) { case kTfLiteFloat32: return GatherNd(params, indices, output); diff --git a/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc b/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc index cbc863645b74b9..fea343ae6b8824 100644 --- a/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc +++ b/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc @@ -40,12 +40,14 @@ void RunOneAveragePoolTest(const PoolParams& params, std::vector optimized_averagePool_output(buffer_size); std::vector reference_averagePool_output(buffer_size); - reference_integer_ops::AveragePool(params, input_shape, input_data, - output_shape, - reference_averagePool_output.data()); - optimized_integer_ops::AveragePool(params, input_shape, input_data, - output_shape, - optimized_averagePool_output.data()); + bool reference_success = reference_integer_ops::AveragePool( + params, input_shape, input_data, output_shape, + reference_averagePool_output.data()); + bool optimized_success = optimized_integer_ops::AveragePool( + params, input_shape, input_data, output_shape, + optimized_averagePool_output.data()); + EXPECT_TRUE(reference_success); + EXPECT_TRUE(optimized_success); for (int i = 0; i < buffer_size; i++) { EXPECT_TRUE(reference_averagePool_output[i] == diff --git a/tensorflow/lite/kernels/internal/common.h b/tensorflow/lite/kernels/internal/common.h index c433fc8817fe53..bd80d68c617db7 100644 --- a/tensorflow/lite/kernels/internal/common.h +++ b/tensorflow/lite/kernels/internal/common.h @@ -75,6 +75,7 @@ float ActivationFunction(float x) { inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { + if (bias_size == 0) return; // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h index 17495135038c65..0a6d63d3fabea6 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h @@ -144,7 +144,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8* input_data, const RuntimeShape& output_shape, int8* output_data) { ruy::profiler::ScopeLabel label("AveragePool/8bitWith32bitAccumulator"); @@ -192,6 +192,7 @@ inline void AveragePool(const PoolParams& params, std::min(params.filter_height, input_height - in_y_origin); const int filter_count = (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start); + if (filter_count == 0) return false; memset(acc, 0, tranche_depth * sizeof(acc[0])); const int8* input_ptr = input_data + depth_base + @@ -267,6 +268,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } } // namespace optimized_integer_ops diff --git a/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h index 44a553c527ab23..76b8d4c55a5e9b 100644 --- a/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h @@ -3761,7 +3761,7 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims, output_data, output_dims); } -inline void AveragePool(const float* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float output_activation_min, @@ -3776,35 +3776,37 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.float_activation_min = output_activation_min; params.float_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float* output_data, const Dims<4>& output_dims) { float output_activation_min, output_activation_max; GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, kwidth, kheight, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, kwidth, kheight, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, float* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, output_data, + output_dims); } -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, @@ -3819,13 +3821,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.quantized_activation_min = output_activation_min; params.quantized_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, @@ -3839,21 +3841,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims, TFLITE_DCHECK_EQ(output_activation_min, 0); TFLITE_DCHECK_EQ(output_activation_max, 255); } - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, + output_data, output_dims); } inline void MaxPool(const float* input_data, const Dims<4>& input_dims, diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index 09f51c188874fe..eeb58987384dd9 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -265,7 +265,7 @@ inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params, // We have broadcast y2*y3*y4 of input2 data y1 times, and now move on. input2_data_reset = input2_data_ptr; } - } else { + } else if (input1_data_ptr != nullptr) { // Special case of y4 == 1, in which the innermost loop is a single // element and can be combined with the next (y3) as an inner broadcast. // @@ -3168,7 +3168,7 @@ inline int NodeOffset(int b, int h, int w, int height, int width) { return (b * height + h) * width + w; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -3183,6 +3183,9 @@ inline void AveragePool(const PoolParams& params, const int stride_height = params.stride_height; const int stride_width = params.stride_width; + if (stride_height == 0) return false; + if (stride_width == 0) return false; + // TODO(benoitjacob) make this a proper reference impl without Eigen! const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape); auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape); @@ -3228,9 +3231,11 @@ inline void AveragePool(const PoolParams& params, params.float_activation_min, params.float_activation_max); } + + return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8* input_data, const RuntimeShape& output_shape, uint8* output_data) { @@ -3279,6 +3284,7 @@ inline void AveragePool(const PoolParams& params, std::min(params.filter_height, input_height - in_y_origin); const int filter_count = (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start); + if (filter_count == 0) return false; memset(acc, 0, tranche_depth * sizeof(acc[0])); const uint8* input_ptr = input_data + depth_base + @@ -3365,6 +3371,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h index 17944bc47dd5d3..2cb4dada8a66eb 100644 --- a/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +++ b/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h @@ -21,7 +21,7 @@ limitations under the License. namespace tflite { namespace reference_integer_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8_t* input_data, const RuntimeShape& output_shape, int8_t* output_data) { @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -77,6 +78,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, @@ -136,7 +138,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& output_shape, @@ -182,6 +184,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -193,6 +196,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h b/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h index 2d7056fcd855e4..0b65b1f49cf380 100644 --- a/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h +++ b/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h @@ -1487,7 +1487,7 @@ void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data, output_data); } -inline void AveragePool(const float* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float output_activation_min, @@ -1502,8 +1502,8 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.float_activation_min = output_activation_min; params.float_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // Transitional version that will be moved shortly to legacy_reference_ops, as @@ -1562,29 +1562,31 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims, // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float* output_data, const Dims<4>& output_dims) { float output_activation_min, output_activation_max; GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, kwidth, kheight, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, kwidth, kheight, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, float* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, output_data, + output_dims); } -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, @@ -1599,13 +1601,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.quantized_activation_min = output_activation_min; params.quantized_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, @@ -1619,21 +1621,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims, TFLITE_DCHECK_EQ(output_activation_min, 0); TFLITE_DCHECK_EQ(output_activation_max, 255); } - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, + output_data, output_dims); } inline void MaxPool(const float* input_data, const Dims<4>& input_dims, diff --git a/tensorflow/lite/kernels/internal/reference/pooling.h b/tensorflow/lite/kernels/internal/reference/pooling.h index 0872f5210c8edb..ee30b8404464ba 100644 --- a/tensorflow/lite/kernels/internal/reference/pooling.h +++ b/tensorflow/lite/kernels/internal/reference/pooling.h @@ -23,7 +23,7 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; const float average = total / filter_count; output_data[Offset(output_shape, batch, out_y, out_x, channel)] = ActivationFunctionWithMinMax(average, params.float_activation_min, @@ -74,9 +75,10 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8_t* input_data, const RuntimeShape& output_shape, @@ -122,6 +124,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; acc = (acc + filter_count / 2) / filter_count; acc = std::max(acc, params.quantized_activation_min); acc = std::min(acc, params.quantized_activation_max); @@ -131,6 +134,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index c8fbea6dc30ded..4f61cbd4b1890d 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -119,6 +119,7 @@ TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node, TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { TfLiteTensor* tensor = GetMutableInput(context, node, index); + if (tensor == nullptr) return nullptr; return tensor->is_variable ? tensor : nullptr; } diff --git a/tensorflow/lite/kernels/lsh_projection.cc b/tensorflow/lite/kernels/lsh_projection.cc index 81f97ecf9a9ce7..92a5ee556f724c 100644 --- a/tensorflow/lite/kernels/lsh_projection.cc +++ b/tensorflow/lite/kernels/lsh_projection.cc @@ -28,7 +28,7 @@ limitations under the License. // // Input: // Tensor[0]: Hash functions. Dim.size == 2, DataType: Float. -// Tensor[0].Dim[0]: Num of hash functions. +// Tensor[0].Dim[0]: Num of hash functions. Must be at least 1. // Tensor[0].Dim[1]: Num of projected output bits generated by // each hash function. // In sparse case, Tensor[0].Dim[1] + ceil( log2(Tensor[0].Dim[0] )) <= 32. @@ -82,6 +82,7 @@ TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input)); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TF_LITE_ENSURE(context, SizeOfDimension(input, 0) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight; diff --git a/tensorflow/lite/kernels/pooling.cc b/tensorflow/lite/kernels/pooling.cc index 474bd3825f4ff2..d54bd89b221511 100644 --- a/tensorflow/lite/kernels/pooling.cc +++ b/tensorflow/lite/kernels/pooling.cc @@ -117,117 +117,126 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { } template -void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { +TfLiteStatus AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.float_activation_min = activation_min; \ - op_params.float_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.float_activation_min = activation_min; \ + op_params.float_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, - TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_integer_ops); } else { TF_LITE_AVERAGE_POOL(optimized_integer_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, - TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) TF_LITE_AVERAGE_POOL(reference_integer_ops); #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template @@ -380,20 +389,17 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; + return AverageEvalFloat(context, node, params, data, input, + output); case kTfLiteUInt8: - AverageEvalQuantizedUint8(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedUint8(context, node, params, data, + input, output); case kTfLiteInt8: - AverageEvalQuantizedInt8(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedInt8(context, node, params, data, + input, output); case kTfLiteInt16: - AverageEvalQuantizedInt16(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedInt16(context, node, params, data, + input, output); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); diff --git a/tensorflow/lite/kernels/svdf.cc b/tensorflow/lite/kernels/svdf.cc index 73024fd1e587a2..34622dfbd574b1 100644 --- a/tensorflow/lite/kernels/svdf.cc +++ b/tensorflow/lite/kernels/svdf.cc @@ -256,14 +256,21 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output_temp_size_array)); // Calculate effective scales. + TF_LITE_ENSURE(context, input->quantization.type != kTfLiteNoQuantization); auto* input_params = reinterpret_cast(input->quantization.params); + TF_LITE_ENSURE(context, + weights_feature->quantization.type != kTfLiteNoQuantization); auto* weights_feature_params = reinterpret_cast( weights_feature->quantization.params); + TF_LITE_ENSURE(context, state->quantization.type != kTfLiteNoQuantization); auto* state_params = reinterpret_cast(state->quantization.params); + TF_LITE_ENSURE(context, + weights_time->quantization.type != kTfLiteNoQuantization); auto* weight_time_params = reinterpret_cast( weights_time->quantization.params); + TF_LITE_ENSURE(context, output->quantization.type != kTfLiteNoQuantization); auto* output_params = reinterpret_cast( output->quantization.params); const double effective_scale_1 = input_params->scale->data[0] * @@ -299,6 +306,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { GetTemporarySafe(context, node, /*index=*/0, &scratch)); TfLiteTensor* state = GetVariableInput(context, node, kStateTensor); + TF_LITE_ENSURE(context, state != nullptr); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); diff --git a/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc index f8bd8c43a670f4..690393300f085c 100644 --- a/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc +++ b/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc @@ -62,8 +62,12 @@ TfLiteStatus PopulateQuantizedLstmParams8x8_16( context, GetOutputSafe(context, node, lstm::full::kOutputTensor, &output_tensor)); + TF_LITE_ENSURE(context, + cell_state->quantization.type != kTfLiteNoQuantization); auto* cell_state_params = static_cast(cell_state->quantization.params); + TF_LITE_ENSURE(context, + output_tensor->quantization.type != kTfLiteNoQuantization); auto* proj_params = static_cast( output_tensor->quantization.params); if (cell_clip > 0.0) { @@ -160,6 +164,8 @@ TfLiteStatus PopulateQuantizedLstmParams8x8_16( TfLiteTensor* intermediate; TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, i, &intermediate)); + TF_LITE_ENSURE(context, + intermediate->quantization.type != kTfLiteNoQuantization); auto* params = static_cast( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); @@ -174,6 +180,7 @@ TfLiteStatus PopulateQuantizedLstmParams8x8_16( // is ignored. TfLiteTensor* hidden; TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, 4, &hidden)); + TF_LITE_ENSURE(context, hidden->quantization.type != kTfLiteNoQuantization); auto* hidden_params = static_cast(hidden->quantization.params); intermediate_scale.push_back(hidden_params->scale->data[0]); @@ -760,6 +767,8 @@ TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context, const TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[4]]; + TF_LITE_ENSURE(context, + intermediate->quantization.type != kTfLiteNoQuantization); const auto* params = static_cast(intermediate->quantization.params); const int32_t hidden_zp = params->zero_point->data[0]; diff --git a/tensorflow/lite/schema/upgrade_schema_test.py b/tensorflow/lite/schema/upgrade_schema_test.py index e55925053e0ae9..99154ccb205b7f 100644 --- a/tensorflow/lite/schema/upgrade_schema_test.py +++ b/tensorflow/lite/schema/upgrade_schema_test.py @@ -254,13 +254,13 @@ class TestSchemaUpgrade(test_util.TensorFlowTestCase): def testNonExistentFile(self): converter = upgrade_schema_lib.Converter() - non_existent = tempfile.mktemp(suffix=".json") + _, non_existent = tempfile.mkstemp(suffix=".json") # safe to ignore fd with self.assertRaisesRegex(IOError, "No such file or directory"): converter.Convert(non_existent, non_existent) def testInvalidExtension(self): converter = upgrade_schema_lib.Converter() - invalid_extension = tempfile.mktemp(suffix=".foo") + _, invalid_extension = tempfile.mkstemp(suffix=".foo") # safe to ignore fd with self.assertRaisesRegex(ValueError, "Invalid extension on input"): converter.Convert(invalid_extension, invalid_extension) with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json: diff --git a/tensorflow/lite/tools/cmake/modules/abseil-cpp.cmake b/tensorflow/lite/tools/cmake/modules/abseil-cpp.cmake index 5f362f45c75c86..7124b114ca8099 100644 --- a/tensorflow/lite/tools/cmake/modules/abseil-cpp.cmake +++ b/tensorflow/lite/tools/cmake/modules/abseil-cpp.cmake @@ -23,7 +23,7 @@ include(OverridableFetchContent) OverridableFetchContent_Declare( abseil-cpp GIT_REPOSITORY https://github.com/abseil/abseil-cpp - GIT_TAG 20200225.2 # TODO: What version does GRPC and TFLite need? + GIT_TAG 20200923.2 # TODO: What version does GRPC GIT_SHALLOW TRUE GIT_PROGRESS TRUE PREFIX "${CMAKE_BINARY_DIR}" diff --git a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc index c5a7778371eb7e..3fabaffd735c47 100644 --- a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc +++ b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc @@ -245,10 +245,12 @@ FormatConverter::FormatConverter(const std::vector& shape, block_size_.resize(block_map_.size()); for (int i = 0; i < original_rank; i++) { if (block_dim < block_map_.size() && block_map_[block_dim] == i) { - int orig_dim = traversal_order_[original_rank + block_dim]; - block_size_[block_dim] = sparsity.dim_metadata[orig_dim].dense_size; - blocked_shape_[i] = shape[i] / sparsity.dim_metadata[orig_dim].dense_size; - block_dim++; + if (original_rank + block_dim < traversal_order_.size()) { + int orig_dim = traversal_order_[original_rank + block_dim]; + block_size_[block_dim] = sparsity.dim_metadata[orig_dim].dense_size; + blocked_shape_[i] = shape[i] / sparsity.dim_metadata[orig_dim].dense_size; + block_dim++; + } } else { blocked_shape_[i] = shape[i]; } @@ -291,13 +293,15 @@ void FormatConverter::Populate(const T* src_data, std::vector indices, Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i, src_data_ptr, dest_data); } - } else { + } else if (prev_idx + 1 < dim_metadata_[metadata_idx].size()) { const auto& array_segments = dim_metadata_[metadata_idx]; const auto& array_indices = dim_metadata_[metadata_idx + 1]; for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1]; i++) { - indices[level] = array_indices[i]; - Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data); + if (i < array_indices.size() && level < indices.size()) { + indices[level] = array_indices[i]; + Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data); + } } } } diff --git a/tensorflow/lite/util.cc b/tensorflow/lite/util.cc index 84dbc16b6079c0..cb2d1ef73a950a 100644 --- a/tensorflow/lite/util.cc +++ b/tensorflow/lite/util.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/macros.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -176,4 +177,19 @@ bool IsValidationSubgraph(const char* name) { // NOLINTNEXTLINE: can't use absl::StartsWith as absl is not allowed. return name && std::string(name).find(kValidationSubgraphNamePrefix) == 0; } + +TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) { + // Multiplying a * b where a and b are size_t cannot result in overflow in a + // size_t accumulator if both numbers have no non-zero bits in their upper + // half. + constexpr size_t size_t_bits = 8 * sizeof(size_t); + constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2; + *product = a * b; + // If neither integers have non-zero bits past 32 bits can't overflow. + // Otherwise check using slow devision. + if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) { + if (a != 0 && *product / a != b) return kTfLiteError; + } + return kTfLiteOk; +} } // namespace tflite diff --git a/tensorflow/lite/util.h b/tensorflow/lite/util.h index d9d7f7a0a8e673..e6a1aefcd9e5b8 100644 --- a/tensorflow/lite/util.h +++ b/tensorflow/lite/util.h @@ -99,6 +99,12 @@ constexpr char kValidationSubgraphNamePrefix[] = "VALIDATION:"; // Checks whether the prefix of the subgraph name indicates the subgraph is a // validation subgraph. bool IsValidationSubgraph(const char* name); + +// Multiply two sizes and return true if overflow occurred; +// This is based off tensorflow/overflow.h but is simpler as we already +// have unsigned numbers. It is also generalized to work where sizeof(size_t) +// is not 8. +TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product); } // namespace tflite #endif // TENSORFLOW_LITE_UTIL_H_ diff --git a/tensorflow/lite/util_test.cc b/tensorflow/lite/util_test.cc index 46601b908dc690..9b630d515575d4 100644 --- a/tensorflow/lite/util_test.cc +++ b/tensorflow/lite/util_test.cc @@ -22,6 +22,7 @@ limitations under the License. #include #include +#include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -130,6 +131,13 @@ TEST(ValidationSubgraph, NameIsDetected) { EXPECT_TRUE(IsValidationSubgraph("VALIDATION:main")); } +TEST(MultiplyAndCheckOverflow, Validate) { + size_t res = 0; + EXPECT_TRUE(MultiplyAndCheckOverflow(1, 2, &res) == kTfLiteOk); + EXPECT_FALSE(MultiplyAndCheckOverflow(static_cast(123456789023), + 1223423425, &res) == kTfLiteOk); +} + } // namespace } // namespace tflite diff --git a/tensorflow/python/data/experimental/kernel_tests/io_test.py b/tensorflow/python/data/experimental/kernel_tests/io_test.py index 8ff6372641b9fd..b77267f838a7e1 100644 --- a/tensorflow/python/data/experimental/kernel_tests/io_test.py +++ b/tensorflow/python/data/experimental/kernel_tests/io_test.py @@ -17,6 +17,7 @@ from __future__ import division from __future__ import print_function +import numpy as np import os import shutil @@ -111,6 +112,20 @@ def testOptionalElementSpec(self): dataset_loaded = io.load(self._test_dir) self.assertDatasetsEqual(dataset, dataset_loaded) + @combinations.generate(test_base.eager_only_combinations()) + def testRepeatAndPrefetch(self): + """This test reproduces github.com/tensorflow/tensorflow/issues/49165""" + dataset1 = dataset_ops.Dataset.from_tensor_slices(np.random.rand(16, 32)) + io.save(dataset1, self._test_dir) + dataset = io.load(self._test_dir) + dataset = dataset.shuffle(buffer_size=16) + dataset = dataset.batch(16) + dataset = dataset.repeat() + dataset = dataset.prefetch(1) + next_element = self.getNext(dataset) + for _ in range(30): + self.evaluate(next_element()) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py index 0caa2d48b9bb32..f6ba69cdf5b909 100644 --- a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py +++ b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py @@ -118,6 +118,45 @@ def testEmptySparseTensorSlices(self): with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) + @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) + def testEmptySparseTensorSlicesInvalid(self): + """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" + st = array_ops.sparse_placeholder(dtypes.float64) + iterator = dataset_ops.make_initializable_iterator( + dataset_ops.Dataset.from_sparse_tensor_slices(st)) + init_op = iterator.initializer + + with self.cached_session() as sess: + # Test with an empty sparse tensor but with non empty values. + empty_indices = np.empty((0, 4), dtype=np.int64) + non_empty_values = [1, 2, 3, 4] + empty_dense_shape = [0, 4, 37, 9] + sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, + non_empty_values, + empty_dense_shape) + # Here, we expect the test to fail when running the feed. + with self.assertRaises(errors.InvalidArgumentError): + sess.run(init_op, feed_dict={st: sparse_feed}) + + @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) + def testEmptySparseTensorSlicesInvalid2(self): + """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" + st = array_ops.sparse_placeholder(dtypes.float64) + iterator = dataset_ops.make_initializable_iterator( + dataset_ops.Dataset.from_sparse_tensor_slices(st)) + init_op = iterator.initializer + + with self.cached_session() as sess: + # Test with an empty sparse tensor but with non empty values. + empty_indices = [[]] + empty_values = [] + dense_shape = [1, 1] + sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values, + dense_shape) + # Here, we expect the test to fail when running the feed. + with self.assertRaises(errors.InvalidArgumentError): + sess.run(init_op, feed_dict={st: sparse_feed}) + @combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"])) def testFromSparseTensorSlicesError(self): with self.assertRaises(AttributeError): diff --git a/tensorflow/python/debug/cli/curses_ui_test.py b/tensorflow/python/debug/cli/curses_ui_test.py index 3ffa031923dc53..db844f24a9a82f 100644 --- a/tensorflow/python/debug/cli/curses_ui_test.py +++ b/tensorflow/python/debug/cli/curses_ui_test.py @@ -90,8 +90,9 @@ def __init__(self, # Override the default path to the command history file to avoid test # concurrency issues. + _, history_file_path = tempfile.mkstemp() # safe to ignore fd self._command_history_store = debugger_cli_common.CommandHistory( - history_file_path=tempfile.mktemp()) + history_file_path=history_file_path) # Below, override the _screen_ prefixed member methods that interact with the # actual terminal, so that the mock can run in a terminal-less environment. diff --git a/tensorflow/python/debug/cli/debugger_cli_common_test.py b/tensorflow/python/debug/cli/debugger_cli_common_test.py index 93df845c4c585d..6d0bd2bbd906b4 100644 --- a/tensorflow/python/debug/cli/debugger_cli_common_test.py +++ b/tensorflow/python/debug/cli/debugger_cli_common_test.py @@ -253,7 +253,9 @@ def testWriteToFileSucceeds(self): font_attr_segs={0: [(0, 5, "red")], 1: [(0, 7, "blue")]}) - file_path = tempfile.mktemp() + fd, file_path = tempfile.mkstemp() + os.close(fd) # file opened exclusively, so we need to close this + # a better fix would be to make the API take a fd screen_output.write_to_file(file_path) with gfile.Open(file_path, "r") as f: @@ -930,12 +932,13 @@ def testDeregisterNonexistentContext(self): class CommandHistoryTest(test_util.TensorFlowTestCase): def setUp(self): - self._history_file_path = tempfile.mktemp() + self._fd, self._history_file_path = tempfile.mkstemp() self._cmd_hist = debugger_cli_common.CommandHistory( limit=3, history_file_path=self._history_file_path) def tearDown(self): if os.path.isfile(self._history_file_path): + os.close(self._fd) os.remove(self._history_file_path) def _restoreFileReadWritePermissions(self, file_path): @@ -1002,13 +1005,6 @@ def testRepeatingCommandsDoNotGetLoggedRepeatedly(self): self.assertEqual(["help"], self._cmd_hist.most_recent_n(2)) - def testCommandHistoryFileIsCreated(self): - self.assertFalse(os.path.isfile(self._history_file_path)) - self._cmd_hist.add_command("help") - self.assertTrue(os.path.isfile(self._history_file_path)) - with open(self._history_file_path, "rt") as f: - self.assertEqual(["help\n"], f.readlines()) - def testLoadingCommandHistoryFileObeysLimit(self): self._cmd_hist.add_command("help 1") self._cmd_hist.add_command("help 2") diff --git a/tensorflow/python/debug/cli/readline_ui_test.py b/tensorflow/python/debug/cli/readline_ui_test.py index 011ba23fc4d63b..64351ceb6b820a 100644 --- a/tensorflow/python/debug/cli/readline_ui_test.py +++ b/tensorflow/python/debug/cli/readline_ui_test.py @@ -35,9 +35,11 @@ class MockReadlineUI(readline_ui.ReadlineUI): """Test subclass of ReadlineUI that bypasses terminal manipulations.""" def __init__(self, on_ui_exit=None, command_sequence=None): + _, config_file_path = tempfile.mkstemp() # safe to ignore fd readline_ui.ReadlineUI.__init__( - self, on_ui_exit=on_ui_exit, - config=cli_config.CLIConfig(config_file_path=tempfile.mktemp())) + self, + on_ui_exit=on_ui_exit, + config=cli_config.CLIConfig(config_file_path=config_file_path)) self._command_sequence = command_sequence self._command_counter = 0 @@ -168,7 +170,7 @@ def callback_for_test(): self.assertTrue(observer["callback_invoked"]) def testIncompleteRedirectWorks(self): - output_path = tempfile.mktemp() + _, output_path = tempfile.mkstemp() # safe to ignore fd ui = MockReadlineUI( command_sequence=["babble -n 2 > %s" % output_path, "exit"]) diff --git a/tensorflow/python/debug/examples/v1/debug_errors.py b/tensorflow/python/debug/examples/v1/debug_errors.py index 5480a9b6f544e0..83c497999e40eb 100644 --- a/tensorflow/python/debug/examples/v1/debug_errors.py +++ b/tensorflow/python/debug/examples/v1/debug_errors.py @@ -44,9 +44,11 @@ def main(_): z = tf.matmul(m, v, name="z") if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) diff --git a/tensorflow/python/debug/examples/v1/debug_keras.py b/tensorflow/python/debug/examples/v1/debug_keras.py index ffc575776c26d2..4f7405a4deea71 100644 --- a/tensorflow/python/debug/examples/v1/debug_keras.py +++ b/tensorflow/python/debug/examples/v1/debug_keras.py @@ -44,9 +44,11 @@ def main(_): sess = tf.Session() if FLAGS.debug: # Use the command-line interface (CLI) of tfdbg. - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) elif FLAGS.tensorboard_debug_address: diff --git a/tensorflow/python/debug/examples/v1/debug_mnist_v1.py b/tensorflow/python/debug/examples/v1/debug_mnist_v1.py index cde1fb97ff280d..d2e67e85b41dd0 100644 --- a/tensorflow/python/debug/examples/v1/debug_mnist_v1.py +++ b/tensorflow/python/debug/examples/v1/debug_mnist_v1.py @@ -214,9 +214,11 @@ def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): "The --debug and --tensorboard_debug_address flags are mutually " "exclusive.") if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) elif FLAGS.tensorboard_debug_address: diff --git a/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py b/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py index 81f41247fd35dc..0de57ef2f81a31 100644 --- a/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py +++ b/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py @@ -62,9 +62,11 @@ def test_input_fn(): "exclusive.") hooks = [] if FLAGS.debug: - config_file_path = ( - tempfile.mktemp(".tfdbg_config") - if FLAGS.use_random_config_path else None) + if FLAGS.use_random_config_path: + # TODO(mihaimaruseac): Safe to ignore fd here? + _, config_file_path = tempfile.mkstemp(".tfdbg_config") + else: + config_file_path = None hooks.append( tf_debug.LocalCLIDebugHook( ui_type=FLAGS.ui_type, diff --git a/tensorflow/python/debug/lib/debug_data_test.py b/tensorflow/python/debug/lib/debug_data_test.py index d7ba5cde1f7dbb..e50c7498c096b7 100644 --- a/tensorflow/python/debug/lib/debug_data_test.py +++ b/tensorflow/python/debug/lib/debug_data_test.py @@ -151,8 +151,7 @@ def testDumpSizeBytesIsNoneForNonexistentFilePath(self): class DebugDumpDirTest(test_util.TensorFlowTestCase): def setUp(self): - self._dump_root = tempfile.mktemp() - os.mkdir(self._dump_root) + self._dump_root = tempfile.mkdtemp() def tearDown(self): # Tear down temporary dump directory. @@ -183,7 +182,7 @@ def _makeDataDirWithMultipleDevicesAndDuplicateNodeNames(self): def testDebugDumpDir_nonexistentDumpRoot(self): with self.assertRaisesRegex(IOError, "does not exist"): - debug_data.DebugDumpDir(tempfile.mktemp() + "_foo") + debug_data.DebugDumpDir(tempfile.mkdtemp() + "_foo") def testDebugDumpDir_invalidFileNamingPattern(self): # File name with too few underscores should lead to an exception. diff --git a/tensorflow/python/debug/lib/source_utils_test.py b/tensorflow/python/debug/lib/source_utils_test.py index 366b25e89ac367..ab0dbe616e3fcd 100644 --- a/tensorflow/python/debug/lib/source_utils_test.py +++ b/tensorflow/python/debug/lib/source_utils_test.py @@ -265,8 +265,8 @@ def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self): def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self): # Create an unrelated source file. - unrelated_source_path = tempfile.mktemp() - with open(unrelated_source_path, "wt") as source_file: + fd, unrelated_source_path = tempfile.mkstemp() + with open(fd, "wt") as source_file: source_file.write("print('hello, world')\n") self.assertEqual({}, @@ -277,8 +277,8 @@ def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self): os.remove(unrelated_source_path) def testLoadingPythonSourceFileWithNonAsciiChars(self): - source_path = tempfile.mktemp() - with open(source_path, "wb") as source_file: + fd, source_path = tempfile.mkstemp() + with open(fd, "wb") as source_file: source_file.write(u"print('\U0001f642')\n".encode("utf-8")) source_lines, _ = source_utils.load_source(source_path) self.assertEqual(source_lines, [u"print('\U0001f642')", u""]) diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper.py b/tensorflow/python/debug/wrappers/local_cli_wrapper.py index 4069bdf1f3ffae..1fb2c1c104fe60 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper.py @@ -84,7 +84,7 @@ def __init__(self, self, sess, thread_name_filter=thread_name_filter) if not dump_root: - self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX) + self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX) else: dump_root = os.path.expanduser(dump_root) if os.path.isfile(dump_root): diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py index 0d930b6e7e08a5..a8a6c1c5174d41 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py @@ -32,7 +32,6 @@ from tensorflow.python.debug.wrappers import local_cli_wrapper from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes -from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io @@ -116,7 +115,7 @@ def _launch_cli(self): config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config"))) self._register_this_run_info(readline_cli) - while True: + while self._command_pointer < len(self._command_sequence): command = self._command_sequence[self._command_pointer] self._command_pointer += 1 @@ -136,7 +135,7 @@ def _launch_cli(self): class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase): def setUp(self): - self._tmp_dir = tempfile.mktemp() + self._tmp_dir = tempfile.mkdtemp() self.v = variables.VariableV1(10.0, name="v") self.w = variables.VariableV1(21.0, name="w") @@ -178,15 +177,7 @@ def testConstructWrapper(self): local_cli_wrapper.LocalCLIDebugWrapperSession( session.Session(), log_usage=False) - def testConstructWrapperWithExistingEmptyDumpRoot(self): - os.mkdir(self._tmp_dir) - self.assertTrue(os.path.isdir(self._tmp_dir)) - - local_cli_wrapper.LocalCLIDebugWrapperSession( - session.Session(), dump_root=self._tmp_dir, log_usage=False) - def testConstructWrapperWithExistingNonEmptyDumpRoot(self): - os.mkdir(self._tmp_dir) dir_path = os.path.join(self._tmp_dir, "foo") os.mkdir(dir_path) self.assertTrue(os.path.isdir(dir_path)) @@ -197,7 +188,6 @@ def testConstructWrapperWithExistingNonEmptyDumpRoot(self): session.Session(), dump_root=self._tmp_dir, log_usage=False) def testConstructWrapperWithExistingFileDumpRoot(self): - os.mkdir(self._tmp_dir) file_path = os.path.join(self._tmp_dir, "foo") open(file_path, "a").close() # Create the file self.assertTrue(os.path.isfile(file_path)) @@ -533,16 +523,6 @@ def testRuntimeErrorShouldBeCaught(self): tf_error = wrapped_sess.observers["tf_errors"][0] self.assertEqual("y", tf_error.op.name) - def testRuntimeErrorBeforeGraphExecutionIsRaised(self): - # Use an impossible device name to cause an error before graph execution. - with ops.device("/device:GPU:1337"): - w = variables.VariableV1([1.0] * 10, name="w") - - wrapped_sess = LocalCLIDebuggerWrapperSessionForTest( - [["run"]], self.sess, dump_root=self._tmp_dir) - with self.assertRaisesRegex(errors.OpError, r".*[Dd]evice.*1337.*"): - wrapped_sess.run(w) - def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self): wrapped_sess = LocalCLIDebuggerWrapperSessionForTest( [["run", "-f", "v_greater_than_twelve"], diff --git a/tensorflow/python/eager/def_function.py b/tensorflow/python/eager/def_function.py index 1c90634095bc31..eb728abaa1d8e6 100644 --- a/tensorflow/python/eager/def_function.py +++ b/tensorflow/python/eager/def_function.py @@ -576,7 +576,7 @@ def __init__(self, ValueError: if `input_signature` is not None and the `python_function`'s argspec has keyword arguments. """ - self._lock = threading.Lock() + self._lock = threading.RLock() self._python_function = python_function self._function_spec = function_lib.FunctionSpec.from_function_and_signature( python_function, @@ -617,7 +617,7 @@ def __getstate__(self): def __setstate__(self, state): """Restore from pickled state.""" self.__dict__ = state - self._lock = threading.Lock() + self._lock = threading.RLock() self._descriptor_cache = weakref.WeakKeyDictionary() self._key_for_call_stats = self._get_key_for_call_stats() diff --git a/tensorflow/python/eager/def_function_test.py b/tensorflow/python/eager/def_function_test.py index bc41a070e0bc8d..b932dbda80bd76 100644 --- a/tensorflow/python/eager/def_function_test.py +++ b/tensorflow/python/eager/def_function_test.py @@ -28,6 +28,7 @@ from six.moves import range from tensorflow.python.autograph.core import converter +from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import lift_to_graph from tensorflow.python.framework import constant_op @@ -38,6 +39,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.module import module from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops @@ -1000,6 +1002,117 @@ def testDouble(self, a): self.assertAllEqual(obj2.testDouble.experimental_get_tracing_count(), 3) self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2) + def test_recursive_tf_function(self): + + @def_function.function + def recursive_fn(n): + if n > 0: + return recursive_fn(n - 1) + return 1 + + self.assertEqual(recursive_fn(5).numpy(), 1) + + def test_recursive_tf_function_with_gradients(self): + + @def_function.function + def recursive_fn(n, x): + if n > 0: + return n * recursive_fn(n - 1, x) + else: + return x + + x = variables.Variable(1.0) + with backprop.GradientTape() as tape: + g = recursive_fn(5, x) + + dg_dx = tape.gradient(g, x) + self.assertEqual(dg_dx.numpy(), 120) + + def test_recursive_python_function(self): + + def recursive_py_fn(n): + if n > 0: + return recursive_py_fn(n - 1) + return 1 + + @def_function.function + def recursive_fn(n): + return recursive_py_fn(n) + + self.assertEqual(recursive_fn(5).numpy(), 1) + + def test_recursive_python_function_with_gradients(self): + + def recursive_py_fn(n, x): + if n > 0: + return n * recursive_py_fn(n - 1, x) + return x + + @def_function.function + def recursive_fn(n, x): + return recursive_py_fn(n, x) + + x = variables.Variable(1.0) + with backprop.GradientTape() as tape: + g = recursive_fn(5, x) + + dg_dx = tape.gradient(g, x) + self.assertEqual(dg_dx.numpy(), 120) + + def test_recursive_tf_function_call_each_other(self): + + @def_function.function + def recursive_fn1(n): + if n <= 1: + return 1 + return recursive_fn2(n - 1) + + @def_function.function + def recursive_fn2(n): + if n <= 1: + return 2 + return recursive_fn1(n - 1) + + self.assertEqual(recursive_fn1(5).numpy(), 1) + self.assertEqual(recursive_fn1(6).numpy(), 2) + self.assertEqual(recursive_fn2(5).numpy(), 2) + self.assertEqual(recursive_fn2(6).numpy(), 1) + + def test_recursive_tf_function_call_each_other_with_gradients(self): + + @def_function.function + def recursive_fn1(n, x): + if n <= 1: + return x + return n * recursive_fn2(n - 1, x) + + @def_function.function + def recursive_fn2(n, x): + if n <= 1: + return 2 * x + return n * recursive_fn1(n - 1, x) + + x = variables.Variable(1.0) + with backprop.GradientTape() as tape: + g1 = recursive_fn1(5, x) + + dg1_dx = tape.gradient(g1, x) + self.assertEqual(dg1_dx.numpy(), 120) + + with backprop.GradientTape() as tape: + g2 = recursive_fn2(5, x) + + dg2_dx = tape.gradient(g2, x) + self.assertEqual(dg2_dx.numpy(), 240) + + def test_recursive_tf_function_with_cond(self): + @def_function.function(autograph=False) + def recursive_fn(n): + return cond_v2.cond_v2(n > 0, recursive_fn(n - 1), 1) + + with self.assertRaises(RecursionError): + recursive_fn(constant_op.constant(5)) + if __name__ == '__main__': ops.enable_eager_execution() diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py index 6a65aca1d2d13f..0af0e78ca43f45 100644 --- a/tensorflow/python/eager/function.py +++ b/tensorflow/python/eager/function.py @@ -3007,7 +3007,7 @@ def __init__(self, self._hashable_input_signature = _make_input_signature_hashable( self.flat_input_signature) - self._lock = threading.Lock() + self._lock = threading.RLock() # _descriptor_cache is a of instance of a class to an instance-specific # `Function`, used to make sure defun-decorated methods create different # functions for each instance. diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index eea6c986238944..4ecb3ba85c1df9 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -2167,14 +2167,13 @@ def testMyOperator(self): """ stream.flush() fd = stream.fileno() - tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir()) - tmp_file = open(tmp_file_path, "w") + tmp_file, tmp_file_path = tempfile.mkstemp(dir=self.get_temp_dir()) orig_fd = os.dup(fd) - os.dup2(tmp_file.fileno(), fd) + os.dup2(tmp_file, fd) try: yield CapturedWrites(tmp_file_path) finally: - tmp_file.close() + os.close(tmp_file) os.dup2(orig_fd, fd) def _AssertProtoEquals(self, a, b, msg=None): diff --git a/tensorflow/python/keras/engine/functional.py b/tensorflow/python/keras/engine/functional.py index b84672ede11be7..5768d291f0cee0 100644 --- a/tensorflow/python/keras/engine/functional.py +++ b/tensorflow/python/keras/engine/functional.py @@ -53,7 +53,7 @@ class Functional(training_lib.Model): than with subclassed `Model`s, specifically: - Model cloning (`keras.models.clone`) - - Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()` + - Serialization (`model.get_config()/from_config`, `model.to_json()` - Whole-model saving (`model.save()`) A `Functional` model can be instantiated by passing two arguments to diff --git a/tensorflow/python/keras/engine/functional_test.py b/tensorflow/python/keras/engine/functional_test.py index c71345b164b826..61895cb48ce1a1 100644 --- a/tensorflow/python/keras/engine/functional_test.py +++ b/tensorflow/python/keras/engine/functional_test.py @@ -47,11 +47,6 @@ from tensorflow.python.platform import test from tensorflow.python.training.tracking.util import Checkpoint -try: - import yaml # pylint:disable=g-import-not-at-top -except ImportError: - yaml = None - class NetworkConstructionTest(keras_parameterized.TestCase): @@ -627,10 +622,6 @@ def test_multi_input_multi_output_recursion(self): json_str = model.to_json() models.model_from_json(json_str) - if yaml is not None: - yaml_str = model.to_yaml() - models.model_from_yaml(yaml_str) - @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_invalid_graphs(self): a = layers.Input(shape=(32,), name='input_a') @@ -1361,10 +1352,6 @@ def test_constant_initializer_with_numpy(self): json_str = model.to_json() models.model_from_json(json_str) - if yaml is not None: - yaml_str = model.to_yaml() - models.model_from_yaml(yaml_str) - def test_subclassed_error_if_init_not_called(self): class MyNetwork(training_lib.Model): diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py index 3285b41273d7ae..bcf7d87f530655 100644 --- a/tensorflow/python/keras/engine/training.py +++ b/tensorflow/python/keras/engine/training.py @@ -84,11 +84,6 @@ import h5py except ImportError: h5py = None - -try: - import yaml -except ImportError: - yaml = None # pylint: enable=g-import-not-at-top @@ -2382,6 +2377,9 @@ def to_json(self, **kwargs): def to_yaml(self, **kwargs): """Returns a yaml string containing the network configuration. + Note: Since TF 2.6, this method is no longer supported and will raise a + RuntimeError. + To load a network from a yaml save file, use `keras.models.model_from_yaml(yaml_string, custom_objects={})`. @@ -2397,12 +2395,12 @@ def to_yaml(self, **kwargs): A YAML string. Raises: - ImportError: if yaml module is not found. + RuntimeError: announces that the method poses a security risk """ - if yaml is None: - raise ImportError( - 'Requires yaml module installed (`pip install pyyaml`).') - return yaml.dump(self._updated_config(), **kwargs) + raise RuntimeError( + 'Method `model.to_yaml()` has been removed due to security risk of ' + 'arbitrary code execution. Please use `model.to_json()` instead.' + ) def reset_states(self): for layer in self.layers: diff --git a/tensorflow/python/keras/saving/model_config.py b/tensorflow/python/keras/saving/model_config.py index efe2a94e4f63c0..1f4309e0c461e8 100644 --- a/tensorflow/python/keras/saving/model_config.py +++ b/tensorflow/python/keras/saving/model_config.py @@ -18,18 +18,11 @@ from tensorflow.python.keras.saving.saved_model import json_utils from tensorflow.python.util.tf_export import keras_export -# pylint: disable=g-import-not-at-top -try: - import yaml -except ImportError: - yaml = None -# pylint: enable=g-import-not-at-top - @keras_export('keras.models.model_from_config') def model_from_config(config, custom_objects=None): """Instantiates a Keras model from its config. - + Usage: ``` # for a Functional API model @@ -63,17 +56,8 @@ def model_from_config(config, custom_objects=None): def model_from_yaml(yaml_string, custom_objects=None): """Parses a yaml model configuration file and returns a model instance. - Usage: - - >>> model = tf.keras.Sequential([ - ... tf.keras.layers.Dense(5, input_shape=(3,)), - ... tf.keras.layers.Softmax()]) - >>> try: - ... import yaml - ... config = model.to_yaml() - ... loaded_model = tf.keras.models.model_from_yaml(config) - ... except ImportError: - ... pass + Note: Since TF 2.6, this method is no longer supported and will raise a + RuntimeError. Args: yaml_string: YAML string or open file encoding a model configuration. @@ -85,19 +69,13 @@ def model_from_yaml(yaml_string, custom_objects=None): A Keras model instance (uncompiled). Raises: - ImportError: if yaml module is not found. + RuntimeError: announces that the method poses a security risk """ - if yaml is None: - raise ImportError('Requires yaml module installed (`pip install pyyaml`).') - # The method unsafe_load only exists in PyYAML 5.x+, so which branch of the - # try block is covered by tests depends on the installed version of PyYAML. - try: - # PyYAML 5.x+ - config = yaml.unsafe_load(yaml_string) - except AttributeError: - config = yaml.load(yaml_string) - from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top - return deserialize(config, custom_objects=custom_objects) + raise RuntimeError( + 'Method `model_from_yaml()` has been removed due to security risk of ' + 'arbitrary code execution. Please use `Model.to_json()` and ' + '`model_from_json()` instead.' + ) @keras_export('keras.models.model_from_json') diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index 0f90494a95f68d..1698b991f3102e 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -582,6 +582,9 @@ tf_py_test( size = "small", srcs = ["fractional_avg_pool_op_test.py"], shard_count = 5, + tags = [ + "no_oss", + ], deps = [ "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", @@ -598,6 +601,9 @@ tf_py_test( size = "small", srcs = ["fractional_max_pool_op_test.py"], shard_count = 5, + tags = [ + "no_oss", + ], deps = [ "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", @@ -2359,7 +2365,7 @@ cuda_py_test( name = "pad_op_test", size = "small", srcs = ["pad_op_test.py"], - tags = ["no_mac"], # test is times out on mac b/186262388 + tags = ["no_oss"], # test is times out on mac b/186262388 xla_tags = [ "no_cuda_asan", # times out ], diff --git a/tensorflow/python/kernel_tests/array_ops/stack_op_test.py b/tensorflow/python/kernel_tests/array_ops/stack_op_test.py index f0e7db4a5ae127..a3b8f2524d8427 100644 --- a/tensorflow/python/kernel_tests/array_ops/stack_op_test.py +++ b/tensorflow/python/kernel_tests/array_ops/stack_op_test.py @@ -20,12 +20,16 @@ import numpy as np +from tensorflow.python import tf2 +from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import variables from tensorflow.python.platform import test @@ -76,6 +80,19 @@ def testSimpleParallelCPU(self): c = array_ops.parallel_stack(xs) self.assertAllEqual(c, data) + def testParallelConcatShapeZero(self): + if not tf2.enabled(): + self.skipTest("only fails in TF2") + + @def_function.function + def f(): + y = gen_array_ops.parallel_concat(values=[["tf"]], shape=0) + return y + + with self.assertRaisesRegex(errors.InvalidArgumentError, + r"0th dimension of value .* is less than"): + f() + def testSimpleParallelGPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py index 326385fc7c9176..97ff2f539b1c92 100644 --- a/tensorflow/python/kernel_tests/array_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops_test.py @@ -1570,11 +1570,25 @@ def testUnravelIndexZeroDim(self): with self.cached_session(): for dtype in [dtypes.int32, dtypes.int64]: with self.assertRaisesRegex(errors.InvalidArgumentError, - "index is out of bound as with dims"): + "dims cannot contain a dim of zero"): indices = constant_op.constant([2, 5, 7], dtype=dtype) dims = constant_op.constant([3, 0], dtype=dtype) self.evaluate(array_ops.unravel_index(indices=indices, dims=dims)) + def testUnravelIndexIntegerOverflow(self): + with self.cached_session(): + for dtype in [dtypes.int32, dtypes.int64]: + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Input dims product is causing integer overflow"): + indices = constant_op.constant(-0x100000, dtype=dtype) + if dtype == dtypes.int32: + value = 0x10000000 + else: + value = 0x7FFFFFFFFFFFFFFF + dims = constant_op.constant([value, value], dtype=dtype) + self.evaluate(array_ops.unravel_index(indices=indices, dims=dims)) + class GuaranteeConstOpTest(test_util.TensorFlowTestCase): @@ -1703,6 +1717,21 @@ def f(a): output_grad = gradient_checker_v2.compute_gradient(f, [input_tensor]) self.assertAllClose(output_grad[0], np.zeros([1, 4, 4])) + def testOutOfBoundAxis(self): + input_tensor = constant_op.constant([1., 1.]) + input_min = [0] + input_max = [1] + q_input, _, _ = array_ops.quantize(input_tensor, 0, 1, dtypes.qint32) + error = (errors.InvalidArgumentError, ValueError) + with self.assertRaisesRegex(error, + r".*Axis must be less than input dimension.*"): + self.evaluate( + gen_array_ops.dequantize( + input=q_input, + min_range=input_min, + max_range=input_max, + axis=2**31 - 1)) + @test_util.run_all_in_graph_and_eager_modes class SortedSearchTest(test_util.TensorFlowTestCase): diff --git a/tensorflow/python/kernel_tests/attention_ops_test.py b/tensorflow/python/kernel_tests/attention_ops_test.py index 804a0b20cc9dd4..29a69778e55edc 100644 --- a/tensorflow/python/kernel_tests/attention_ops_test.py +++ b/tensorflow/python/kernel_tests/attention_ops_test.py @@ -22,6 +22,7 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import image_ops @@ -301,6 +302,18 @@ def testGlimpseNonNormalizedNonCentered(self): np.asarray([[5, 6, 7], [10, 11, 12], [15, 16, 17]]), self.evaluate(result2)[0, :, :, 0]) + def testGlimpseNegativeInput(self): + img = np.arange(9).reshape([1, 3, 3, 1]) + with self.test_session(): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + result = image_ops.extract_glimpse_v2( + img, + size=[1023, -63], + offsets=[1023, 63], + centered=False, + normalized=False) + self.evaluate(result) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/kernel_tests/bincount_op_test.py b/tensorflow/python/kernel_tests/bincount_op_test.py index 4ca81333ab35fb..f08944c865dad6 100644 --- a/tensorflow/python/kernel_tests/bincount_op_test.py +++ b/tensorflow/python/kernel_tests/bincount_op_test.py @@ -332,6 +332,14 @@ def test_invalid_rank(self): gen_math_ops.dense_bincount( input=[[[1, 2, 3], [0, 3, 2]]], weights=[], size=10)) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.dense_bincount( + input=[0], size=[1, 1], weights=[3], binary_output=False)) + class SparseBincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @@ -499,6 +507,19 @@ def test_sparse_bincount_col_reduce_binary(self, dtype): weights=[], binary_output=True))) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.sparse_bincount( + indices=[[0], [1]], + values=[0, 0], + dense_shape=[1, 1], + size=[1, 1], + weights=[0, 0], + binary_output=False)) + class RaggedBincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @@ -638,6 +659,19 @@ def test_ragged_bincount_binary_np_with_weights(self, dtype): size=size, binary_output=True))) + @test_util.run_in_graph_and_eager_modes + def test_size_is_not_scalar(self): # b/206619828 + with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), + "Shape must be rank 0 but is rank 1"): + self.evaluate( + gen_math_ops.ragged_bincount( + splits=[0, 0, 1], + values=[1], + size=[1, 1], + weights=[0, 0, 0], + binary_output=False, + name=None)) + if __name__ == "__main__": googletest.main() diff --git a/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py b/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py index 73098ed3084da6..2af570da73f815 100644 --- a/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py +++ b/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py @@ -21,9 +21,11 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import boosted_trees_ops +from tensorflow.python.ops import gen_boosted_trees_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import googletest @@ -1669,6 +1671,199 @@ def testMakeStatsSummaryNumericalPrecisionMegaBatch(self): """Tests numeric precision.""" self._verify_precision(length=50000000) + def testBoostedTreesCalculateBestGainsPerFeatureSecurity(self): + node_id_range = [1, 2] + stats_summary_list = [[[[]]]] + l1 = [1.0] + l2 = [1.0] + tree_complexity = [1.0] + min_node_weight = [1.17] + max_splits = 1 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_calculate_best_gains_per_feature( + node_id_range=node_id_range, + stats_summary_list=stats_summary_list, + l1=l1, + l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + max_splits=max_splits) + + def testBoostedTreesCalculateBestFeatureSplitSecurity(self): + node_id_range = [1, 2] + stats_summary = [[[[]]]] + split_type = 'equality' + l1 = [1.0] + l2 = [1.0] + tree_complexity = [1.0] + min_node_weight = [1.17] + logits_dimension = 5 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_calculate_best_feature_split( + node_id_range=node_id_range, + stats_summary=stats_summary, + l1=l1, + l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension, + split_type=split_type) + + def testBoostedTreesCalculateBestFeatureSplitSecurity2(self): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_calculate_best_feature_split( + node_id_range=[0, 8], + stats_summary=[[[[1.0], [2.0], [3.0]]]], + l1=[0.5], + l2=[0.5], + tree_complexity=[0.1], + min_node_weight=[1.0], + logits_dimension=8) + + def testBoostedTreesCalculateBestFeatureSplitV2Security(self): + node_id_range = [1, 2] + stats_summaries_list = [[[[[]]]]] + split_types = ['inequality'] + candidate_feature_ids = [1, 2, 3, 4] + l1 = [1.0] + l2 = [1.0] + tree_complexity = [1.0] + min_node_weight = [1.17] + logits_dimension = 5 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_calculate_best_feature_split_v2( + node_id_range=node_id_range, + stats_summaries_list=stats_summaries_list, + split_types=split_types, + candidate_feature_ids=candidate_feature_ids, + l1=l1, + l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension) + + def testBoostedTreesSparseCalculateBestFeatureSplitSecurity(self): + node_id_range = [] + stats_summary_indices = [[]] + stats_summary_values = [1.0] + stats_summary_shape = [1, 1, 1, 1] + l1 = [1.0] + l2 = [1.0] + tree_complexity = [0.5] + min_node_weight = [1.0] + logits_dimension = 3 + split_type = 'inequality' + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_sparse_calculate_best_feature_split( + node_id_range=node_id_range, + stats_summary_indices=stats_summary_indices, + stats_summary_values=stats_summary_values, + stats_summary_shape=stats_summary_shape, + l1=l1, + l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension, + split_type=split_type) + + def testBoostedTreesSparseCalculateBestFeatureSplitSecurity2(self): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_sparse_calculate_best_feature_split( + node_id_range=[0, 1], + stats_summary_indices=[[0, -1, -1, -1], [1, 0, -1, 0], [1, 0, 0, -1]], + stats_summary_values=[0.1, 0.2, 0.3], + stats_summary_shape=[1, 1, 1, 1], + l1=[0.5], + l2=[0.5], + tree_complexity=[0.1], + min_node_weight=[1.0], + logits_dimension=1) + + def testBoostedTreesMakeStatsSummarySecurity(self): + node_ids = [1, 2] + gradients = [[]] + hessians = [[0.2], [0.1]] + bucketized_features_list = [[1], [2]] + max_splits = 3 + num_buckets = 3 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_make_stats_summary( + node_ids=node_ids, + gradients=gradients, + hessians=hessians, + bucketized_features_list=bucketized_features_list, + max_splits=max_splits, + num_buckets=num_buckets) + + def testBoostedTreesMakeStatsSummarySecurity2(self): + node_ids = [1, 2, 3] + gradients = [[0.1], [0.2]] + hessians = [[0.2], [0.1]] + bucketized_features_list = [[1], [2]] + max_splits = 3 + num_buckets = 3 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_make_stats_summary( + node_ids=node_ids, + gradients=gradients, + hessians=hessians, + bucketized_features_list=bucketized_features_list, + max_splits=max_splits, + num_buckets=num_buckets) + + def testBoostedTreesAggregateStatsSecurity(self): + node_ids = [1, 2] + gradients = [[]] + hessians = [[100.0]] + feature = [[0, 0, 0]] + max_splits = 100 + num_buckets = 100 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_aggregate_stats( + node_ids=node_ids, + gradients=gradients, + hessians=hessians, + feature=feature, + max_splits=max_splits, + num_buckets=num_buckets) + + def testBoostedTreesAggregateStatsSecurity2(self): + node_ids = [-10] + gradients = [[0.0, 0.0]] + hessians = [[100.0]] + feature = [[0, 0, 0]] + max_splits = 100 + num_buckets = 100 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + self.evaluate( + gen_boosted_trees_ops.boosted_trees_aggregate_stats( + node_ids=node_ids, + gradients=gradients, + hessians=hessians, + feature=feature, + max_splits=max_splits, + num_buckets=num_buckets)) + + def testBoostedTreesSparseAggregateStatsSecurity(self): + node_ids = [] + gradients = [[1.0]] + hessians = [[100.0]] + feature_indices = [[0, 0, 0]] + feature_values = [0, 0, 0] + feature_shape = [0, 0, 0] + max_splits = 100 + num_buckets = 100 + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + gen_boosted_trees_ops.boosted_trees_sparse_aggregate_stats( + node_ids=node_ids, + gradients=gradients, + hessians=hessians, + feature_indices=feature_indices, + feature_values=feature_values, + feature_shape=feature_shape, + max_splits=max_splits, + num_buckets=num_buckets) + class BestMultiDimFeatureSplitMultiClassV2Op(StatsOpsTest): """Tests multi-class/multi-regression for best splits using V2 op.""" diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py index da4f4f86b0220d..3f1401baa730ec 100644 --- a/tensorflow/python/kernel_tests/concat_op_test.py +++ b/tensorflow/python/kernel_tests/concat_op_test.py @@ -20,6 +20,7 @@ import numpy as np +from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl @@ -574,6 +575,17 @@ def testConcatInvalidAxis(self): t2 = [2] gen_array_ops.concat_v2([t1, t2], 1).eval() + def testConcatInvalidAxisInTfFunction(self): + + @def_function.function + def concat_wrapper(): + y = gen_array_ops.concat_v2( + values=[[1, 2, 3], [4, 5, 6]], axis=0xb500005b) + return y + + with self.assertRaises(ValueError): + concat_wrapper() + def testConcatNegativeAxis(self): with test_util.use_gpu(): t1 = [[1, 2, 3], [4, 5, 6]] diff --git a/tensorflow/python/kernel_tests/conv_ops_3d_test.py b/tensorflow/python/kernel_tests/conv_ops_3d_test.py index 5a7fa64d2bfbde..dabc61f8306bbe 100644 --- a/tensorflow/python/kernel_tests/conv_ops_3d_test.py +++ b/tensorflow/python/kernel_tests/conv_ops_3d_test.py @@ -24,6 +24,7 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker @@ -460,6 +461,16 @@ def testKernelSizeMatchesInputSize(self): padding="VALID", expected=[1.5625, 1.875]) + def testZeroSizedFilterThrowsIllegalArgument(self): + tensor_in_sizes = [1, 1, 1, 1, 1] + x1 = self._CreateNumpyTensor(tensor_in_sizes) + filter_in = np.ones((1, 1, 0, 1, 1), dtype=np.float32) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, "filter must not have zero elements" + "|has a non-positive dimension"): + self.evaluate( + nn_ops.conv3d(x1, filter_in, strides=[1, 1, 1, 1, 1], padding="SAME")) + def _ConstructAndTestGradientForConfig( self, batch, input_shape, filter_shape, in_depth, out_depth, stride, padding, test_input, data_format, use_gpu): diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py index 424cdcb90fbc50..d753e74589db47 100644 --- a/tensorflow/python/kernel_tests/conv_ops_test.py +++ b/tensorflow/python/kernel_tests/conv_ops_test.py @@ -2531,139 +2531,83 @@ def testShapeFunctionEdgeCases(self): strides=[1, 1, 1, 1], padding=[0, 0, 0, 0]) - @test_util.deprecated_graph_mode_only def testOpEdgeCases(self): - with self.cached_session() as sess: - # Illegal strides. - with self.assertRaisesRegex(errors_impl.UnimplementedError, - "strides in the batch and depth"): - input_placeholder = array_ops.placeholder(dtypes.float32) - input_val = np.ones([10, 10]) - filter_placeholder = array_ops.placeholder(dtypes.float32) - filter_val = np.ones([10, 10]) - sess.run( - nn_ops.conv2d( - input_placeholder, - filter_placeholder, - strides=[2, 1, 1, 1], - padding="SAME"), - feed_dict={ - input_placeholder: input_val, - filter_placeholder: filter_val - }) - with self.assertRaisesRegex(errors_impl.UnimplementedError, - "strides in the batch and depth"): - input_placeholder = array_ops.placeholder(dtypes.float32) - filter_placeholder = array_ops.placeholder(dtypes.float32) - input_val = np.ones([10, 10]) - filter_val = np.ones([10, 10]) - sess.run( - nn_ops.conv2d( - input_placeholder, - filter_placeholder, - strides=[1, 1, 1, 2], - padding="SAME"), - feed_dict={ - input_placeholder: input_val, - filter_placeholder: filter_val - }) - - # Filter larger than input. - with self.assertRaisesRegex(ValueError, "Negative dimension size"): - input_placeholder = array_ops.placeholder( - dtypes.float32, shape=[32, 20, 20, 3]) - input_val = np.ones([32, 20, 20, 3]) - filter_placeholder = array_ops.placeholder( - dtypes.float32, shape=[20, 21, 3, 2]) - filter_val = np.ones([20, 21, 3, 2]) - - sess.run( - nn_ops.conv2d( - input_placeholder, - filter_placeholder, - strides=[1, 1, 1, 1], - padding="VALID"), - feed_dict={ - input_placeholder: input_val, - filter_placeholder: filter_val - }) - with self.assertRaisesRegex(ValueError, "Negative dimension size"): - input_placeholder = array_ops.placeholder( - dtypes.float32, shape=[32, 20, 20, 3]) - input_val = np.ones([32, 20, 20, 3]) - filter_placeholder = array_ops.placeholder( - dtypes.float32, shape=[21, 20, 3, 2]) - filter_val = np.ones([21, 20, 3, 2]) - sess.run( - nn_ops.conv2d( - input_placeholder, - filter_placeholder, - strides=[1, 1, 1, 1], - padding="VALID"), - feed_dict={ - input_placeholder: input_val, - filter_placeholder: filter_val - }) - - # Filter larger than input + padding. - with self.assertRaisesRegex(ValueError, "Negative dimension size"): - input_placeholder = array_ops.placeholder( - dtypes.float32, shape=[32, 20, 20, 3]) - input_val = np.ones([32, 20, 20, 3]) - filter_placeholder = array_ops.placeholder( - dtypes.float32, shape=[24, 25, 3, 2]) - filter_val = np.ones([24, 25, 3, 2]) - sess.run( - nn_ops.conv2d( - input_placeholder, - filter_placeholder, - strides=[1, 1, 1, 1], - padding=[[0, 0], [2, 2], [2, 2], [0, 0]]), - feed_dict={ - input_placeholder: input_val, - filter_placeholder: filter_val - }) - - # Negative padding during backprop. - with self.assertRaisesRegex( - errors_impl.InvalidArgumentError, - "All elements of explicit_paddings must be nonnegative"): - filter_placeholder = array_ops.placeholder( - dtypes.float32, shape=[18, 18, 3, 2]) - filter_val = np.ones([18, 18, 3, 2]) - out_backprop = array_ops.placeholder( - dtypes.float32, shape=[32, 3, 2, 2]) - out_backprop_val = np.ones([32, 3, 2, 2]) - sess.run( - nn_ops.conv2d_backprop_input([32, 20, 20, 3], - filter_placeholder, - out_backprop, - strides=[1, 1, 1, 1], - padding=[[0, 0], [-1, 0], [0, 0], - [0, 0]]), - feed_dict={ - filter_placeholder: filter_val, - out_backprop: out_backprop_val - }) - with self.assertRaisesRegex( - errors_impl.InvalidArgumentError, - "All elements of explicit_paddings must be nonnegative"): - input_placeholder = array_ops.placeholder( - dtypes.float32, shape=[32, 20, 20, 3]) - input_val = np.ones([32, 20, 20, 3]) - out_backprop = array_ops.placeholder( - dtypes.float32, shape=[32, 3, 2, 2]) - out_backprop_val = np.ones([32, 3, 2, 2]) - sess.run( - nn_ops.conv2d_backprop_filter( - input_placeholder, [18, 18, 3, 2], - out_backprop, - strides=[1, 1, 1, 1], - padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]), - feed_dict={ - input_placeholder: input_val, - out_backprop: out_backprop_val - }) + # Illegal strides. + with self.assertRaisesRegex((ValueError, errors_impl.UnimplementedError), + "strides in the batch and depth"): + input_val = np.ones([2, 4, 10, 10]) + filter_val = np.ones([2, 4, 10, 10]) + self.evaluate( + nn_ops.conv2d( + input_val, filter_val, strides=[2, 1, 1, 1], padding="SAME")) + with self.assertRaisesRegex((ValueError, errors_impl.UnimplementedError), + "strides in the batch and depth"): + input_val = np.ones([2, 4, 10, 10]) + filter_val = np.ones([2, 4, 10, 10]) + self.evaluate( + nn_ops.conv2d( + input_val, filter_val, strides=[1, 1, 1, 2], padding="SAME")) + + # TODO(b/195689143): Will enable when fixed for V2 behavior + # # Filter larger than input. + # with self.assertRaisesRegex(ValueError, "Negative dimension size"): + # input_val = np.ones([32, 20, 20, 3]) + # filter_val = np.ones([20, 21, 3, 2]) + # self.evaluate( + # nn_ops.conv2d( + # input_val, filter_val, strides=[1, 1, 1, 1], padding="VALID")) + # with self.assertRaisesRegex(ValueError, "Negative dimension size"): + # input_val = np.ones([32, 20, 20, 3]) + # filter_val = np.ones([21, 20, 3, 2]) + # self.evaluate( + # nn_ops.conv2d( + # input_val, filter_val, strides=[1, 1, 1, 1], padding="VALID")) + # + # # Filter larger than input + padding. + # with self.assertRaisesRegex(ValueError, "Negative dimension size"): + # input_val = np.ones([32, 20, 20, 3]) + # filter_val = np.ones([24, 25, 3, 2]) + # self.evaluate( + # nn_ops.conv2d( + # input_val, + # filter_val, + # strides=[1, 1, 1, 1], + # padding=[[0, 0], [2, 2], [2, 2], [0, 0]])) + + # Filter dimensions must be greater than 0. + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, "filter must not have zero elements" + "|has a non-positive dimension"): + input_val = np.ones([1, 1, 1, 1]) + filter_val = np.ones([1, 0, 1, 1]) + self.evaluate( + nn_ops.conv2d( + input_val, filter_val, strides=[1, 1, 1, 1], padding="SAME")) + + # Negative padding during backprop. + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + "All elements of explicit_paddings must be nonnegative"): + filter_val = np.ones([18, 18, 3, 2]) + out_backprop_val = np.ones([32, 3, 2, 2]) + self.evaluate( + nn_ops.conv2d_backprop_input([32, 20, 20, 3], + filter_val, + out_backprop_val, + strides=[1, 1, 1, 1], + padding=[[0, 0], [-1, 0], [0, 0], [0, + 0]])) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + "All elements of explicit_paddings must be nonnegative"): + input_val = np.ones([32, 20, 20, 3]) + out_backprop_val = np.ones([32, 3, 2, 2]) + self.evaluate( + nn_ops.conv2d_backprop_filter( + input_val, [18, 18, 3, 2], + out_backprop_val, + strides=[1, 1, 1, 1], + padding=[[0, 0], [-1, 0], [0, 0], [0, 0]])) class DepthwiseConv2DTest(test.TestCase): @@ -2673,10 +2617,10 @@ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, """Verifies the output values of the convolution function. Args: - tensor_in_sizes: Input tensor dimensions in - [batch, input_rows, input_cols, input_depth]. - filter_in_sizes: Filter tensor dimensions in - [filter_rows, filter_cols, input_depth, depth_multiplier]. + tensor_in_sizes: Input tensor dimensions in [batch, input_rows, + input_cols, input_depth]. + filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, + input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. diff --git a/tensorflow/python/kernel_tests/distributions/BUILD b/tensorflow/python/kernel_tests/distributions/BUILD index d4f29d4837fd02..3c0fb03e441ac0 100644 --- a/tensorflow/python/kernel_tests/distributions/BUILD +++ b/tensorflow/python/kernel_tests/distributions/BUILD @@ -61,6 +61,7 @@ cuda_py_test( size = "small", srcs = ["beta_test.py"], tags = [ + "no_oss", "notsan", # b/173653918 ], xla_tags = [ diff --git a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py index 0d5928aefacf5a..54be2f4844f12c 100644 --- a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py +++ b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py @@ -24,6 +24,7 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops @@ -310,6 +311,32 @@ def testDifferentInputTensorShape(self): input_b, row_seq, col_seq, overlapping) self.assertSequenceEqual(expected.shape, actual.shape) + def testNegativeSeqValuesForGradOp(self): + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Row sequence tensor values must not be negative.*"): + y = nn_ops.gen_nn_ops.fractional_avg_pool_grad( + orig_input_tensor_shape=[2, 2, 2, 2], + out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, + 12]]]], + row_pooling_sequence=[-10, 1, 2, 3], + col_pooling_sequence=[1, 2, 3, 4], + overlapping=True) + + self.evaluate(y) + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Column sequence tensor values must not be negative.*"): + z = nn_ops.gen_nn_ops.fractional_avg_pool_grad( + orig_input_tensor_shape=[2, 2, 2, 2], + out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, + 12]]]], + row_pooling_sequence=[10, 1, 2, 3], + col_pooling_sequence=[1, 2, -3, 4], + overlapping=True) + + self.evaluate(z) + class FractionalAvgPoolGradTest(test.TestCase): """Tests for FractionalAvgPoolGrad. diff --git a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py index 2b1e30a8bbd606..3e19a9a4a27277 100644 --- a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py +++ b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py @@ -307,6 +307,22 @@ def testDifferentInputTensorShape(self): input_b, row_seq, col_seq, overlapping) self.assertSequenceEqual(expected.shape, actual.shape) + def testDeterminismExceptionThrowing(self): + tensor_shape = (5, 20, 20, 3) + rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500 + with test_util.deterministic_ops(): + with self.assertRaisesRegex( + ValueError, "requires a non-zero seed to be passed in when " + "determinism is enabled"): + nn_ops.fractional_max_pool_v2(rand_mat, [1, 1.5, 1.5, 1]) + nn_ops.fractional_max_pool_v2(rand_mat, [1, 1.5, 1.5, 1], seed=1) + + with self.assertRaisesRegex(ValueError, + 'requires "seed" and "seed2" to be non-zero'): + nn_ops.fractional_max_pool(rand_mat, [1, 1.5, 1.5, 1]) + nn_ops.fractional_max_pool( + rand_mat, [1, 1.5, 1.5, 1], seed=1, seed2=1, deterministic=True) + class FractionalMaxPoolGradTest(test.TestCase): """Tests for FractionalMaxPoolGrad. diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py index 898d6f3e9e3371..d13721146b5ca9 100644 --- a/tensorflow/python/kernel_tests/init_ops_test.py +++ b/tensorflow/python/kernel_tests/init_ops_test.py @@ -23,6 +23,7 @@ from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util @@ -542,6 +543,19 @@ def testMixedDType(self): constant_op.constant(4, dtype=dtypes.int32), dtype=dtypes.int64) self.assertAllEqual(self.evaluate(tf_ans), np.array([0, 1, 2, 3])) + def testLargeStarts(self): + # Test case for GitHub issue 46899. + with self.session(): + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + v = math_ops.range(start=-1e+38, limit=1) + self.evaluate(v) + + def testLargeLimits(self): + # Test case for GitHub issue 46913. + with self.session(): + with self.assertRaises(errors_impl.ResourceExhaustedError): + v = math_ops.range(0, 9223372036854775807) + self.evaluate(v) # TODO(vrv): move to sequence_ops_test? class LinSpaceTest(test.TestCase): diff --git a/tensorflow/python/kernel_tests/logging_ops_test.py b/tensorflow/python/kernel_tests/logging_ops_test.py index b3c30f07d149b7..cb1645d407d8fc 100644 --- a/tensorflow/python/kernel_tests/logging_ops_test.py +++ b/tensorflow/python/kernel_tests/logging_ops_test.py @@ -274,7 +274,7 @@ def testPrintOneTensorStdout(self): self.assertIn((expected + "\n"), printed.contents()) def testPrintTensorsToFile(self): - tmpfile_name = tempfile.mktemp(".printv2_test") + fd, tmpfile_name = tempfile.mkstemp(".printv2_test") tensor_0 = math_ops.range(0, 10) print_op_0 = logging_ops.print_v2(tensor_0, output_stream="file://"+tmpfile_name) @@ -284,14 +284,14 @@ def testPrintTensorsToFile(self): output_stream="file://"+tmpfile_name) self.evaluate(print_op_1) try: - f = open(tmpfile_name, "r") + f = os.fdopen(fd, "r") line_0 = f.readline() expected_0 = "[0 1 2 ... 7 8 9]" self.assertTrue(expected_0 in line_0) line_1 = f.readline() expected_1 = "[11 12 13 ... 17 18 19]" self.assertTrue(expected_1 in line_1) - f.close() + os.close(fd) os.remove(tmpfile_name) except IOError as e: self.fail(e) diff --git a/tensorflow/python/kernel_tests/map_stage_op_test.py b/tensorflow/python/kernel_tests/map_stage_op_test.py index 516fc37517ca57..8600ad1f8d726b 100644 --- a/tensorflow/python/kernel_tests/map_stage_op_test.py +++ b/tensorflow/python/kernel_tests/map_stage_op_test.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +import numpy as np -from tensorflow.python.framework import errors +from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops @@ -32,7 +31,7 @@ class MapStageTest(test.TestCase): @test_util.run_deprecated_v1 def testSimple(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) @@ -44,9 +43,9 @@ def testSimple(self): k, y = stager.get(gi) y = math_ops.reduce_max(math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -54,7 +53,7 @@ def testSimple(self): @test_util.run_deprecated_v1 def testMultiple(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) @@ -66,9 +65,9 @@ def testMultiple(self): k, (z, y) = stager.get(gi) y = math_ops.reduce_max(z * math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -77,26 +76,25 @@ def testMultiple(self): @test_util.run_deprecated_v1 def testDictionary(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) v = 2. * (array_ops.zeros([128, 128]) + x) with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.float32, dtypes.float32], - shapes=[[], [128, 128]], - names=['x', 'v']) + stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32], + shapes=[[], [128, 128]], + names=['x', 'v']) stage = stager.put(pi, {'x': x, 'v': v}) key, ret = stager.get(gi) z = ret['x'] y = ret['v'] y = math_ops.reduce_max(z * math_ops.matmul(y, y)) - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -106,7 +104,7 @@ def testDictionary(self): def testColocation(self): gpu_dev = test.gpu_device_name() - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) v = 2. * (array_ops.zeros([128, 128]) + x) @@ -123,58 +121,56 @@ def testColocation(self): self.assertEqual(y.device, '/device:CPU:0') self.assertEqual(z[0].device, '/device:CPU:0') - G.finalize() + g.finalize() @test_util.run_deprecated_v1 def testPeek(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) p = array_ops.placeholder(dtypes.int32, name='p') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], shapes=[[]]) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], shapes=[[]]) stage = stager.put(pi, [x], [0]) peek = stager.peek(gi) size = stager.size() - G.finalize() + g.finalize() n = 10 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: for i in range(n): sess.run(stage, feed_dict={x: i, pi: i}) for i in range(n): - self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i) + self.assertEqual(sess.run(peek, feed_dict={gi: i})[0], i) - self.assertTrue(sess.run(size) == 10) + self.assertEqual(sess.run(size), 10) @test_util.run_deprecated_v1 def testSizeAndClear(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32, name='x') pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) v = 2. * (array_ops.zeros([128, 128]) + x) with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.float32, dtypes.float32], - shapes=[[], [128, 128]], - names=['x', 'v']) + stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32], + shapes=[[], [128, 128]], + names=['x', 'v']) stage = stager.put(pi, {'x': x, 'v': v}) size = stager.size() clear = stager.clear() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: sess.run(stage, feed_dict={x: -1, pi: 3}) self.assertEqual(sess.run(size), 1) sess.run(stage, feed_dict={x: -1, pi: 1}) @@ -186,22 +182,23 @@ def testSizeAndClear(self): def testCapacity(self): capacity = 3 - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], capacity=capacity, shapes=[[]]) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], + capacity=capacity, + shapes=[[]]) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() from six.moves import queue as Queue import threading @@ -209,7 +206,7 @@ def testCapacity(self): queue = Queue.Queue() n = 8 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -238,13 +235,13 @@ def thread_run(): capacity)) # Should have capacity elements in the staging area - self.assertTrue(sess.run(size) == capacity) + self.assertEqual(sess.run(size), capacity) # Clear the staging area completely for i in range(n): sess.run(get) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testMemoryLimit(self): @@ -252,28 +249,28 @@ def testMemoryLimit(self): chunk = 200 * 1024 # 256K capacity = memory_limit // chunk - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.uint8, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [dtypes.uint8], memory_limit=memory_limit, shapes=[[]]) + stager = data_flow_ops.MapStagingArea([dtypes.uint8], + memory_limit=memory_limit, + shapes=[[]]) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() from six.moves import queue as Queue import threading - import numpy as np queue = Queue.Queue() n = 8 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -303,56 +300,57 @@ def thread_run(): capacity)) # Should have capacity elements in the staging area - self.assertTrue(sess.run(size) == capacity) + self.assertEqual(sess.run(size), capacity) # Clear the staging area completely for i in range(n): sess.run(get) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testOrdering(self): import six import random - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64, name='pi') gi = array_ops.placeholder(dtypes.int64, name='gi') with ops.device(test.gpu_device_name()): - stager = data_flow_ops.MapStagingArea( - [ - dtypes.int32, - ], shapes=[[]], ordered=True) + stager = data_flow_ops.MapStagingArea([ + dtypes.int32, + ], + shapes=[[]], + ordered=True) stage = stager.put(pi, [x], [0]) get = stager.get() size = stager.size() - G.finalize() + g.finalize() n = 10 - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Keys n-1..0 keys = list(reversed(six.moves.range(n))) for i in keys: sess.run(stage, feed_dict={pi: i, x: i}) - self.assertTrue(sess.run(size) == n) + self.assertEqual(sess.run(size), n) # Check that key, values come out in ascending order for i, k in enumerate(reversed(keys)): get_key, values = sess.run(get) self.assertTrue(i == k == get_key == values) - self.assertTrue(sess.run(size) == 0) + self.assertEqual(sess.run(size), 0) @test_util.run_deprecated_v1 def testPartialDictInsert(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -370,41 +368,39 @@ def testPartialDictInsert(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain tuple associated with key 0 - self.assertTrue( - sess.run([key, ret], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key, ret], feed_dict={gi: 0}), + [0, { 'x': 1, 'f': 2, 'v': 1 }]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 3}) # We can now obtain tuple associated with key 1 - self.assertTrue( - sess.run([key, ret], feed_dict={ - gi: 1 - }) == [1, { + self.assertEqual( + sess.run([key, ret], feed_dict={gi: 1}), + [1, { 'x': 1, 'f': 2, 'v': 3 @@ -412,7 +408,7 @@ def testPartialDictInsert(self): @test_util.run_deprecated_v1 def testPartialIndexInsert(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -428,35 +424,35 @@ def testPartialIndexInsert(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain tuple associated with key 0 - self.assertTrue(sess.run([key, ret], feed_dict={gi: 0}) == [0, [1, 1, 2]]) + self.assertEqual(sess.run([key, ret], feed_dict={gi: 0}), [0, [1, 1, 2]]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 3}) # We can now obtain tuple associated with key 1 - self.assertTrue(sess.run([key, ret], feed_dict={gi: 1}) == [1, [1, 3, 2]]) + self.assertEqual(sess.run([key, ret], feed_dict={gi: 1}), [1, [1, 3, 2]]) @test_util.run_deprecated_v1 def testPartialDictGetsAndPeeks(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -480,40 +476,38 @@ def testPartialDictGetsAndPeeks(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # 0 complete and incomplete entries - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) # Stage key 0, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Stage key 1, x and f tuple entries sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2}) - self.assertTrue(sess.run([size, isize]) == [0, 2]) + self.assertEqual(sess.run([size, isize]), [0, 2]) # Now complete key 0 with tuple entry v sess.run(stage_v, feed_dict={pi: 0, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now peek at 'x' and 'f' values associated with key 0 - self.assertTrue(sess.run(peek_xf, feed_dict={pei: 0}) == {'x': 1, 'f': 2}) + self.assertEqual(sess.run(peek_xf, feed_dict={pei: 0}), {'x': 1, 'f': 2}) # Peek at 'v' value associated with key 0 - self.assertTrue(sess.run(peek_v, feed_dict={pei: 0}) == {'v': 1}) + self.assertEqual(sess.run(peek_v, feed_dict={pei: 0}), {'v': 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can now obtain 'x' and 'f' values associated with key 0 - self.assertTrue( - sess.run([key_xf, get_xf], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, { 'x': 1, 'f': 2 }]) # Still have 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 1]) + self.assertEqual(sess.run([size, isize]), [1, 1]) # We can no longer get 'x' and 'f' from key 0 with self.assertRaises(errors.InvalidArgumentError) as cm: @@ -521,40 +515,36 @@ def testPartialDictGetsAndPeeks(self): exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.') - self.assertTrue(exc_str in cm.exception.message) + self.assertIn(exc_str, cm.exception.message) # Obtain 'v' value associated with key 0 - self.assertTrue( - sess.run([key_v, get_v], feed_dict={ - gi: 0 - }) == [0, { + self.assertEqual( + sess.run([key_v, get_v], feed_dict={gi: 0}), [0, { 'v': 1 }]) # 0 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [0, 1]) + self.assertEqual(sess.run([size, isize]), [0, 1]) # Now complete key 1 with tuple entry v sess.run(stage_v, feed_dict={pi: 1, v: 1}) # 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Pop without key to obtain 'x' and 'f' values associated with key 1 - self.assertTrue(sess.run([pop_key_xf, pop_xf]) == [1, {'x': 1, 'f': 2}]) + self.assertEqual(sess.run([pop_key_xf, pop_xf]), [1, {'x': 1, 'f': 2}]) # still 1 complete and 1 incomplete entry - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # We can now obtain 'x' and 'f' values associated with key 1 - self.assertTrue( - sess.run([pop_key_v, pop_v], feed_dict={ - pi: 1 - }) == [1, { + self.assertEqual( + sess.run([pop_key_v, pop_v], feed_dict={pi: 1}), [1, { 'v': 1 }]) # Nothing is left - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) @test_util.run_deprecated_v1 def testPartialIndexGets(self): - with ops.Graph().as_default() as G: + with ops.Graph().as_default() as g: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) @@ -572,28 +562,72 @@ def testPartialIndexGets(self): size = stager.size() isize = stager.incomplete_size() - G.finalize() + g.finalize() - with self.session(graph=G) as sess: + with self.session(graph=g) as sess: # Stage complete tuple sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3}) - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Partial get using indices - self.assertTrue( - sess.run([key_xf, get_xf], feed_dict={ - gi: 0 - }) == [0, [1, 2]]) + self.assertEqual( + sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, [1, 2]]) # Still some of key 0 left - self.assertTrue(sess.run([size, isize]) == [1, 0]) + self.assertEqual(sess.run([size, isize]), [1, 0]) # Partial get of remaining index - self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]]) + self.assertEqual(sess.run([key_v, get_v], feed_dict={gi: 0}), [0, [3]]) # All gone - self.assertTrue(sess.run([size, isize]) == [0, 0]) + self.assertEqual(sess.run([size, isize]), [0, 0]) + + @test_util.run_deprecated_v1 + def testNonScalarKeyOrderedMap(self): + with ops.Graph().as_default() as g: + x = array_ops.placeholder(dtypes.float32) + v = 2. * (array_ops.zeros([128, 128]) + x) + t = data_flow_ops.gen_data_flow_ops.ordered_map_stage( + key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64), + indices=np.array([[6]]), + values=[x, v], + dtypes=[dtypes.int64], + capacity=0, + memory_limit=0, + container='container1', + shared_name='', + name=None) + + g.finalize() + + with self.session(graph=g) as sess: + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'key must be an int64 scalar'): + sess.run(t, feed_dict={x: 1}) + + @test_util.run_deprecated_v1 + def testNonScalarKeyUnorderedMap(self): + with ops.Graph().as_default() as g: + x = array_ops.placeholder(dtypes.float32) + v = 2. * (array_ops.zeros([128, 128]) + x) + t = data_flow_ops.gen_data_flow_ops.map_stage( + key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64), + indices=np.array([[6]]), + values=[x, v], + dtypes=[dtypes.int64], + capacity=0, + memory_limit=0, + container='container1', + shared_name='', + name=None) + + g.finalize() + + with self.session(graph=g) as sess: + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'key must be an int64 scalar'): + sess.run(t, feed_dict={x: 1}) if __name__ == '__main__': diff --git a/tensorflow/python/kernel_tests/matrix_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_op_test.py index 0d149de2acb5e5..1739b2272be810 100644 --- a/tensorflow/python/kernel_tests/matrix_solve_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_solve_op_test.py @@ -112,6 +112,12 @@ def testWrongDimensions(self): with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): self.evaluate(linalg_ops.matrix_solve(matrix, rhs)) + # The matrix and right-hand side should have the same batch dimensions + matrix = np.random.normal(size=(2, 6, 2, 2)) + rhs = np.random.normal(size=(2, 3, 2, 2)) + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + self.evaluate(linalg_ops.matrix_solve(matrix, rhs)) + def testNotInvertible(self): # The input should be invertible. with self.assertRaisesOpError("Input matrix is not invertible."): diff --git a/tensorflow/python/kernel_tests/pooling_ops_3d_test.py b/tensorflow/python/kernel_tests/pooling_ops_3d_test.py index 051f7e1168a72b..83d26e27d9849f 100644 --- a/tensorflow/python/kernel_tests/pooling_ops_3d_test.py +++ b/tensorflow/python/kernel_tests/pooling_ops_3d_test.py @@ -21,6 +21,7 @@ import numpy as np from tensorflow.python.framework import constant_op +from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl @@ -505,6 +506,19 @@ def testAvgPoolGradSamePadding3_1_3d(self): strides=(1, 1, 1), padding="SAME") + def testMaxPool3DZeroPoolSize(self): + # Test case for GitHub issue 51936. + for f in [nn_ops.max_pool3d, nn_ops.avg_pool3d]: + with self.session(): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + input_sizes = [3, 4, 10, 11, 12] + + input_data = 1. + input_tensor = constant_op.constant( + input_data, shape=input_sizes, name="input") + pool_3d = f(input_tensor, ksize=[2, 2, 0], strides=1, padding="VALID") + self.evaluate(pool_3d) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py index d4ff43b8341917..b414e0daa9725c 100644 --- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py @@ -267,6 +267,20 @@ def testDataInvalid(self): data=np.uint16(10), segment_ids=np.array([]).astype("int64")) self.evaluate(s) + def testInvalidIds(self): + # Test case for GitHub issue 46888. + for op in [ + math_ops.segment_max, + math_ops.segment_min, + math_ops.segment_mean, + math_ops.segment_sum, + math_ops.segment_prod, + ]: + with self.cached_session(use_gpu=False): + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + s = op(data=np.ones((1, 10, 1)), segment_ids=[1676240524292489355]) + self.evaluate(s) + class UnsortedSegmentTest(SegmentReductionHelper): @@ -743,6 +757,19 @@ def testSegmentsInvalid7(self): with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) + @test_util.run_deprecated_v1 + def testSegmentsInvalid8(self): + tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) + ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] + segment_indices = [2**62 - 1] + tf_indices = [2**62 - 1] + with self.session(use_gpu=False): + for tf_op in ops_list: + s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) + with self.assertRaisesOpError( + "Encountered overflow when multiplying"): + self.evaluate(s) + def testSegmentWithNumSegmentsValid(self): # Baseline for the test*WithNumSegmentsInvalid* methods below. tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) diff --git a/tensorflow/python/kernel_tests/shape_ops_test.py b/tensorflow/python/kernel_tests/shape_ops_test.py index c5f6d02da64efc..3380d47c88accd 100644 --- a/tensorflow/python/kernel_tests/shape_ops_test.py +++ b/tensorflow/python/kernel_tests/shape_ops_test.py @@ -723,6 +723,17 @@ def testShapeFunctionEdgeCases(self): inp, array_ops.placeholder( dtypes.int32, shape=[3])) + def testLargeTensor(self): + # Test case for GItHub issue 46911. + if test_util.is_xla_enabled(): + # The following test fails with XLA enabled. + return + with self.assertRaises(errors_impl.InvalidArgumentError): + with self.cached_session(): + tiled = array_ops.tile( + np.ones((1, 1, 1)), [100000000, 100000000, 100000000]) + self.evaluate(tiled) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/kernel_tests/sparse_ops_test.py b/tensorflow/python/kernel_tests/sparse_ops_test.py index bef02fb6dbb48d..bfaf7f6f03dc8f 100644 --- a/tensorflow/python/kernel_tests/sparse_ops_test.py +++ b/tensorflow/python/kernel_tests/sparse_ops_test.py @@ -786,6 +786,39 @@ def disabledtestSparseReduceSumOrMaxShape(self): self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum) + def testIntegerOverflow(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_max( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_max_sparse( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_reduce_sum( + input_indices=[[1, 2], [3, 4]], + input_shape=[2**32, 2**31], + input_values=[1, 3], + reduction_axes=[0], + keep_dims=False, + name=None) + + self.evaluate(res) + class SparseMathOpsTest(test_util.TensorFlowTestCase): @@ -951,6 +984,25 @@ def testGradient(self): y_tf.values, (nnz,)) self.assertLess(err, 1e-4) + def testIntegerOverflow(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_softmax( + sp_indices=[[1, 1]], + sp_values=[2.0], + sp_shape=[2**32, 2**31], + name=None) + + self.evaluate(res) + + def testReshapeNegativeShape(self): + with self.cached_session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_softmax( + sp_indices=[[1, 1]], sp_values=[2.0], sp_shape=[-1, 1], name=None) + + self.evaluate(res) + class SparseMinimumMaximumTest(test_util.TensorFlowTestCase): diff --git a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py index ab98c9a3deb718..146549859a7bea 100644 --- a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py @@ -103,6 +103,28 @@ def testSameShape(self): self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.dense_shape, input_val.dense_shape) + def testReshapeIntegeroverflow(self): + with self.session(): + with self.assertRaises(errors.InvalidArgumentError): + sp_output = sparse_ops.gen_sparse_ops.sparse_reshape( + input_indices=[[0, 0]], + input_shape=[2**32, 2**31], + new_shape=[1, 1], + name=None) + + self.evaluate(sp_output) + + def testReshapeNegativeShape(self): + with self.session(): + with self.assertRaises(errors.InvalidArgumentError): + sp_output = sparse_ops.gen_sparse_ops.sparse_reshape( + input_indices=[[0, 0]], + input_shape=[1, -1], + new_shape=[1, 1], + name=None) + + self.evaluate(sp_output) + @test_util.run_deprecated_v1 def testFeedSameShape(self): with self.session() as sess: diff --git a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py index 5a48eb825dbfa8..434287679181bb 100644 --- a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py +++ b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py @@ -20,10 +20,12 @@ import numpy as np +from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test @@ -464,6 +466,18 @@ def testDeserializeManyFailsInvalidProto(self): self._testDeserializeFailsInvalidProtoHelper( sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse) + def testDeserializeInvalidVariant(self): + mu = gen_resource_variable_ops.mutex_v2() + mu_lock = gen_resource_variable_ops.mutex_lock(mutex=mu) + + @def_function.function + def f(): + return sparse_ops.deserialize_sparse( + serialized_sparse=mu_lock, dtype=dtypes.int32) + + with self.assertRaisesRegex(ValueError, r"Shape must be at least rank 1"): + f() + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/kernel_tests/sparse_slice_op_test.py b/tensorflow/python/kernel_tests/sparse_slice_op_test.py index a363f80c2fbf00..3c70f108898b17 100644 --- a/tensorflow/python/kernel_tests/sparse_slice_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_slice_op_test.py @@ -20,6 +20,7 @@ import numpy as np +from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker @@ -261,6 +262,27 @@ def testGradients(self): [sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,)) self.assertLess(err, 1e-3) + def testNegativeSize(self): + with self.session(use_gpu=False): + with self.assertRaises(errors.InvalidArgumentError): + res = sparse_ops.gen_sparse_ops.sparse_slice( + indices=[[0, 0]], + values=[0], + shape=[1, 1], + start=[10, 10], + size=[-100, 100]) + self.evaluate(res) + + def testLargeSize(self): + with self.session(use_gpu=False): + # Confirm potential integer overflow due to size is handled by op. + res = sparse_ops.gen_sparse_ops.sparse_slice( + indices=[[0, 0]], + values=[0], + shape=[1, 1], + start=[2**62, -1], + size=[2**62, 2**62]) + self.evaluate(res) if __name__ == '__main__': test.main() diff --git a/tensorflow/python/kernel_tests/sparse_split_op_test.py b/tensorflow/python/kernel_tests/sparse_split_op_test.py index 31ef1129f1319c..b5cc3f02d9d4c7 100644 --- a/tensorflow/python/kernel_tests/sparse_split_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_split_op_test.py @@ -257,6 +257,15 @@ def testArgumentErrors(self): with self.assertRaisesRegex(ValueError, 'axis is required'): sparse_ops.sparse_split(num_split=2, sp_input=1) + def testInvalidArgumentError(self): + # Test case for GitHub issue 53660. + axis = [1, 2] + with self.assertRaisesRegexp(errors.InvalidArgumentError, + r'axis should be a scalar'): + self.evaluate( + sparse_ops.sparse_split( + sp_input=self._SparseTensor_4x6(), num_split=3, axis=axis)) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py index 58674abd144df1..23f8f3878ca0cc 100644 --- a/tensorflow/python/kernel_tests/split_op_test.py +++ b/tensorflow/python/kernel_tests/split_op_test.py @@ -387,6 +387,24 @@ def testNonexistentDimTensor(self): "must have exactly one element"): sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]}) + @test_util.run_in_graph_and_eager_modes + def testNegativeSizes(self): + x = constant_op.constant([1, 2, 3], dtypes.float32) + # A size of -1 signifies to determine size based on sum of other splits. + with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError), + "Split size at index 1 must be >= 0. Got: -2"): + splits = [-1, -2] + self.evaluate(array_ops.split(x, splits, axis=0)) + + @test_util.run_in_graph_and_eager_modes + def testBadSplitSizes(self): + x = constant_op.constant([1, 2], dtypes.float32) + with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError), + "Determined shape must either match input" + "|can't split axis"): + splits = [1, 2] + self.evaluate(array_ops.split(x, splits, axis=0)) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py index 38544000902359..3a4be581676fca 100644 --- a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py @@ -23,12 +23,15 @@ import numpy as np from tensorflow.python.client import session +from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradient_checker_v2 +from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.ops.linalg import linalg_impl @@ -179,6 +182,37 @@ def testGradientComplexWithBatches(self): rhs = self._randomComplexArray((b, m, n)) self._gradientTest(diags, rhs, dtype=dtypes.complex128) + def _testErrorWithShapesEager(self, exception_regex, superdiag_shape, + maindiag_shape, subdiag_shape, rhs_shape): + with context.eager_mode(): + superdiag = array_ops.ones(superdiag_shape) + maindiag = array_ops.ones(maindiag_shape) + subdiag = array_ops.ones(subdiag_shape) + rhs = array_ops.ones(rhs_shape) + with self.assertRaisesRegex(errors_impl.InvalidArgumentError, + exception_regex): + linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs) + + def testInvalidShapesEagerGpu(self): + if not test.is_gpu_available(): + self.skipTest('Test requires GPU') + self._testErrorWithShapesEager('Input must have rank >= 2, but got ', + [2], [2], [2], [2]) + self._testErrorWithShapesEager( + 'superdiag must have same rank as rhs, but got 3 and 2', + [2, 1, 2], [2, 1], [2, 1], [2, 2]) + self._testErrorWithShapesEager( + 'maindiag must have same outer dimensions as rhs, but for index 0, got ' + '3 and 2', + [2, 1, 2], [3, 1, 2], [2, 1, 2], [2, 2, 2]) + self._testErrorWithShapesEager( + "subdiag's second-to-last dimension must be 1, but got 3", + [2, 1, 2], [2, 1, 2], [2, 3, 2], [2, 2, 2]) + self._testErrorWithShapesEager( + "subdiag's last dimension size must be rhs's second-to-last dimension " + "size, but got 3 and 2", + [2, 1, 2], [2, 1, 2], [2, 1, 3], [2, 2, 2]) + # Benchmark class TridiagonalMatMulBenchmark(test.Benchmark): sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1), diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py index b1adbd37e3e862..65be235cf77465 100644 --- a/tensorflow/python/kernel_tests/xent_op_test.py +++ b/tensorflow/python/kernel_tests/xent_op_test.py @@ -195,6 +195,13 @@ def testShapeMismatch(self): gen_nn_ops.softmax_cross_entropy_with_logits( [[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]]) + tf_f = constant_op.constant(np.array([[1.]]).astype(np.float32)) + tf_l = constant_op.constant(np.array([[1.], [1.]]).astype(np.float32)) + tf_loss, tf_gradient = gen_nn_ops.softmax_cross_entropy_with_logits( + tf_f, tf_l) + self.assertAllClose([0, 0], tf_loss) + self.assertAllCloseAccordingToType([[0], [0]], tf_gradient) + @test_util.run_deprecated_v1 def testNotMatrix(self): with self.cached_session(): diff --git a/tensorflow/python/ops/bincount_ops_test.py b/tensorflow/python/ops/bincount_ops_test.py index 039f3452db85db..305791beca6c98 100644 --- a/tensorflow/python/ops/bincount_ops_test.py +++ b/tensorflow/python/ops/bincount_ops_test.py @@ -835,6 +835,25 @@ def test_ragged_input_different_shape_fails(self): self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1)) +class RawOpsHeapOobTest(test.TestCase, parameterized.TestCase): + + @test_util.run_v1_only("Test security error") + def testSparseCountSparseOutputBadIndicesShapeTooSmall(self): + indices = [1] + values = [[1]] + weights = [] + dense_shape = [10] + with self.assertRaisesRegex(ValueError, + "Shape must be rank 2 but is rank 1 for"): + self.evaluate( + gen_count_ops.SparseCountSparseOutput( + indices=indices, + values=values, + dense_shape=dense_shape, + weights=weights, + binary_output=True)) + + @test_util.run_all_in_graph_and_eager_modes @test_util.disable_tfrt class RawOpsTest(test.TestCase, parameterized.TestCase): diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py index 82af4ce06f0ec3..1909b6eb1b84fb 100644 --- a/tensorflow/python/ops/image_ops_test.py +++ b/tensorflow/python/ops/image_ops_test.py @@ -2254,6 +2254,21 @@ def testNameScope(self): y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66) self.assertTrue(y.op.name.startswith("pad_to_bounding_box")) + def testInvalidInput(self): + # Test case for GitHub issue 46890. + if test_util.is_xla_enabled(): + # TODO(b/200850176): test fails with XLA. + return + with self.session(): + with self.assertRaises(errors_impl.InvalidArgumentError): + v = image_ops.pad_to_bounding_box( + image=np.ones((1, 1, 1)), + target_height=5191549470, + target_width=5191549470, + offset_height=1, + offset_width=1) + self.evaluate(v) + class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): @@ -3161,6 +3176,14 @@ def testPreserveAspectRatioSquare(self): self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3]) + def testLargeDim(self): + with self.session(): + with self.assertRaises(errors.InvalidArgumentError): + x = np.ones((5, 1, 1, 2)) + v = image_ops.resize_images_v2(x, [1610637938, 1610637938], + image_ops.ResizeMethod.BILINEAR) + _ = self.evaluate(v) + class ResizeImagesTest(test_util.TensorFlowTestCase, parameterized.TestCase): @@ -6026,6 +6049,16 @@ def testImageCropAndResize(self): crop_size=[1, 1]) self.evaluate(op) + def DISABLED_testImageCropAndResizeWithInvalidInput(self): + with self.session(): + with self.assertRaises((errors.InvalidArgumentError, ValueError)): + op = image_ops_impl.crop_and_resize_v2( + image=np.ones((1, 1, 1, 1)), + boxes=np.ones((11, 4)), + box_indices=np.ones((11)), + crop_size=[2065374891, 1145309325]) + self.evaluate(op) + @parameterized.named_parameters( ("_jpeg", "JPEG", "jpeg_merge_test1.jpg"), ("_png", "PNG", "lena_rgba.png"), diff --git a/tensorflow/python/ops/nn_fused_batchnorm_test.py b/tensorflow/python/ops/nn_fused_batchnorm_test.py index 0421829bff338b..1f4663d495a757 100644 --- a/tensorflow/python/ops/nn_fused_batchnorm_test.py +++ b/tensorflow/python/ops/nn_fused_batchnorm_test.py @@ -20,10 +20,13 @@ import numpy as np +from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops @@ -669,6 +672,126 @@ def testBatchNormGradGradConfig6(self): } self._testBatchNormGradGrad(config) + def testEagerShapeErrors(self): + with context.eager_mode(): + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((3,)) + offset = array_ops.ones((2,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'scale must have the same number of elements'): + nn_impl.fused_batch_norm(x, scale, offset) + + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + offset = array_ops.ones((3,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'offset must have the same number of elements'): + nn_impl.fused_batch_norm(x, scale, offset) + + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + offset = array_ops.ones((2,)) + mean = array_ops.ones((0,)) + variance = array_ops.ones((2,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'When is_training=false, mean must have the same number of elements'): + nn_impl.fused_batch_norm( + x, scale, offset, mean=mean, variance=variance, is_training=False) + + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + offset = array_ops.ones((2,)) + mean = array_ops.ones((2,)) + variance = array_ops.ones((0,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'When is_training=false, variance must have the same number of ' + 'elements'): + nn_impl.fused_batch_norm( + x, scale, offset, mean=mean, variance=variance, is_training=False) + + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + offset = array_ops.ones((2,)) + mean = array_ops.ones((0,)) + variance = array_ops.ones((2,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'When exponential_avg_factor != 1, mean must have the same number of ' + 'elements'): + nn_impl.fused_batch_norm( + x, + scale, + offset, + mean=mean, + variance=variance, + exponential_avg_factor=0.5) + + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + offset = array_ops.ones((2,)) + mean = array_ops.ones((2,)) + variance = array_ops.ones((0,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'When exponential_avg_factor != 1, variance must have the same ' + 'number of elements'): + nn_impl.fused_batch_norm( + x, + scale, + offset, + mean=mean, + variance=variance, + exponential_avg_factor=0.5) + + def testEagerShapeGradErrors(self): + with context.eager_mode(): + y_backprop = array_ops.ones((2, 2, 2, 3)) + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + reserve_space_1 = array_ops.ones((2,)) + reserve_space_2 = array_ops.ones((2,)) + with self.assertRaisesRegex(errors_impl.InvalidArgumentError, + 'x and y_backprop must have same shape,'): + gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale, + reserve_space_1, reserve_space_2) + + y_backprop = array_ops.ones((2, 2, 2, 2)) + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((3,)) + reserve_space_1 = array_ops.ones((2,)) + reserve_space_2 = array_ops.ones((2,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'scale must have the same number of elements'): + gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale, + reserve_space_1, reserve_space_2) + + y_backprop = array_ops.ones((2, 2, 2, 2)) + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + reserve_space_1 = array_ops.ones((3,)) + reserve_space_2 = array_ops.ones((2,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'reserve_space_1 must have the same number of elements'): + gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale, + reserve_space_1, reserve_space_2) + + y_backprop = array_ops.ones((2, 2, 2, 2)) + x = array_ops.ones((2, 2, 2, 2)) + scale = array_ops.ones((2,)) + reserve_space_1 = array_ops.ones((2,)) + reserve_space_2 = array_ops.ones((3,)) + with self.assertRaisesRegex( + errors_impl.InvalidArgumentError, + 'reserve_space_2 must have the same number of elements'): + gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale, + reserve_space_1, reserve_space_2) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD index 75acc2a9712a40..76565faf917152 100644 --- a/tensorflow/python/ops/parallel_for/BUILD +++ b/tensorflow/python/ops/parallel_for/BUILD @@ -113,6 +113,7 @@ cuda_py_test( srcs = ["control_flow_ops_test.py"], shard_count = 16, tags = [ + "no_oss", "no_rocm", ], deps = [ diff --git a/tensorflow/python/ops/ragged/ragged_cross_op_test.py b/tensorflow/python/ops/ragged/ragged_cross_op_test.py index 07e5964ba83497..bfe7aa31a65325 100644 --- a/tensorflow/python/ops/ragged/ragged_cross_op_test.py +++ b/tensorflow/python/ops/ragged/ragged_cross_op_test.py @@ -22,11 +22,14 @@ import numpy as np +from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util +from tensorflow.python.ops import gen_ragged_array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops @@ -362,6 +365,16 @@ def testRaggedCrossLargeBatch(self): dense_const([[2], [3]])], exception=(ValueError, errors.InvalidArgumentError), message='inputs must all have the same batch dimension size'), + dict( + testcase_name='3DDenseTensor', + inputs=[dense_const([[[1]]])], + exception=(ValueError, errors.InvalidArgumentError), + message='tf.ragged.cross only supports inputs with rank=2'), + dict( + testcase_name='0DDenseTensor', + inputs=[dense_const(1)], + exception=(ValueError, errors.InvalidArgumentError), + message='tf.ragged.cross only supports inputs with rank=2'), ]) def testStaticError(self, inputs, exception=ValueError, message=None): with self.assertRaisesRegex(exception, message): @@ -372,17 +385,36 @@ def testStaticError(self, inputs, exception=ValueError, message=None): testcase_name='3DRaggedTensor', inputs=[ragged_const([[[1]]], ragged_rank=1)], message='tf.ragged.cross only supports inputs with rank=2'), + dict( + testcase_name='0DDenseTensor', + inputs=[dense_const(1)], + signature=[[tensor_spec.TensorSpec(None, dtypes.int32)]], + exception=(ValueError, errors.InvalidArgumentError), + message='tf.ragged.cross only supports inputs with rank=2'), + dict( + testcase_name='1DDenseTensor', + inputs=[dense_const([1])], + signature=[[tensor_spec.TensorSpec(None, dtypes.int32)]], + exception=(ValueError, errors.InvalidArgumentError), + message='tf.ragged.cross only supports inputs with rank=2'), dict( testcase_name='3DDenseTensor', inputs=[dense_const([[[1]]])], + signature=[[tensor_spec.TensorSpec(None, dtypes.int32)]], + exception=(ValueError, errors.InvalidArgumentError), message='tf.ragged.cross only supports inputs with rank=2'), ]) def testRuntimeError(self, inputs, exception=errors.InvalidArgumentError, - message=None): + message=None, + signature=None): + @def_function.function(input_signature=signature) + def fn(x): + return ragged_array_ops.cross(x) + with self.assertRaisesRegex(exception, message): - self.evaluate(ragged_array_ops.cross(inputs)) + self.evaluate(fn(inputs)) def _ragged_to_sparse(self, t): if ragged_tensor.is_ragged(t): @@ -392,6 +424,42 @@ def _ragged_to_sparse(self, t): else: return ops.convert_to_tensor(t) + def testSparseValuesAndIndicesMustMatch(self): + with self.assertRaisesRegex( + (ValueError, errors.InvalidArgumentError), + 'sparse indices and values must have the same length'): + self.evaluate(gen_ragged_array_ops.RaggedCross( + ragged_values=[], + ragged_row_splits=[], + sparse_indices=[[5]], + sparse_values=[], + sparse_shape=[5], + dense_inputs=[['a']], + input_order='RD', + hashed_output=False, + num_buckets=5, + hash_key=2, + out_values_type=dtypes.string, + out_row_splits_type=dtypes.int64)) + + def testRaggedValuesAndSplitsMustMatch(self): + with self.assertRaisesRegex( + (ValueError, errors.InvalidArgumentError), + 'ragged values and splits must have the same length'): + self.evaluate(gen_ragged_array_ops.RaggedCross( + ragged_values=[['a']], + ragged_row_splits=[], + sparse_indices=[], + sparse_values=[], + sparse_shape=[], + dense_inputs=[['a']], + input_order='RD', + hashed_output=False, + num_buckets=5, + hash_key=2, + out_values_type=dtypes.string, + out_row_splits_type=dtypes.int64)) + if __name__ == '__main__': googletest.main() diff --git a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py index bead4923a0a4cf..ace724ac8711d2 100644 --- a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py +++ b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py @@ -21,9 +21,11 @@ import numpy as np from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops +from tensorflow.python.ops import map_fn as map_fn_lib from tensorflow.python.ops import math_ops as mo from tensorflow.python.ops import string_ops from tensorflow.python.ops.ragged import ragged_factory_ops @@ -309,6 +311,27 @@ def testMapOnSparseTensor(self): ) self.assertAllEqual(id_t2, [[0, 5], [0, 4]]) + def testRaggedMapWithIncorrectFnOutputSignature(self): + x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'All flat_values must have compatible shapes'): + y = map_fn_lib.map_fn(lambda r: map_fn_lib.map_fn(lambda y: r, r), x) + self.evaluate(y) + + def testNestedRaggedMapWithFnOutputSignature(self): + ragged1d = ragged_tensor.RaggedTensorSpec([None], dtypes.int32) + ragged2d = ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32) + + x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) + # pylint: disable=g-long-lambda + y = map_fn_lib.map_fn( + lambda r: map_fn_lib.map_fn( + lambda y: r, r, fn_output_signature=ragged1d), + x, + fn_output_signature=ragged2d) + expected = [[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], [[1]]] + self.assertAllEqual(y, expected) + if __name__ == '__main__': googletest.main() diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index 6706ef194b221a..8891edba481fee 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -32,7 +32,6 @@ @test_util.run_all_in_graph_and_eager_modes -@test_util.disable_tfrt class RawOpsTest(test.TestCase, parameterized.TestCase): def testSimple(self): @@ -67,8 +66,9 @@ def testDefaults(self): @parameterized.parameters([[0, 8]], [[-1, 6]]) def testStringNGramsBadDataSplits(self, splits): data = ["aa", "bb", "cc", "dd", "ee", "ff"] - with self.assertRaisesRegex(errors.InvalidArgumentError, - "Invalid split value"): + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r"Invalid split value|First split value must be 0"): self.evaluate( gen_string_ops.string_n_grams( data=data, @@ -80,6 +80,37 @@ def testStringNGramsBadDataSplits(self, splits): pad_width=0, preserve_short_sequences=False)) + def testStringSplit(self): + data = ["123456"] + data_splits = [0, 1] + separator = "a" * 15 + ngram_widths = [] + pad_width = -5 + left_pad = right_pad = "" + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Pad width should be >= 0"): + self.evaluate(gen_string_ops.string_n_grams( + data=data, + data_splits=data_splits, + separator=separator, + ngram_widths=ngram_widths, + left_pad=left_pad, + right_pad=right_pad, + pad_width=pad_width, + preserve_short_sequences=True)) + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Pad width could lead to integer overflow"): + self.evaluate( + gen_string_ops.string_n_grams( + data=["000.0", "000.0"], + data_splits=[0, 2], + separator="", + ngram_widths=[2**30, 2**30], + left_pad=" ", + right_pad=" ", + pad_width=-2**30, + preserve_short_sequences=False)) + def testGetSessionHandle(self): if context.executing_eagerly(): with self.assertRaisesRegex( diff --git a/tensorflow/python/saved_model/load_test.py b/tensorflow/python/saved_model/load_test.py index 66245452f172b8..c593a377fbd1d1 100644 --- a/tensorflow/python/saved_model/load_test.py +++ b/tensorflow/python/saved_model/load_test.py @@ -204,8 +204,8 @@ def test_control_outputs(self, cycles): imported_graph.control_outputs) def _make_asset(self, contents): - filename = tempfile.mktemp(prefix=self.get_temp_dir()) - with open(filename, "w") as f: + fd, filename = tempfile.mkstemp(prefix=self.get_temp_dir()) + with os.fdopen(fd, "w") as f: f.write(contents) return filename diff --git a/tensorflow/python/saved_model/load_v1_in_v2.py b/tensorflow/python/saved_model/load_v1_in_v2.py index 8d0160bc3a606b..6f950c425b9eb4 100644 --- a/tensorflow/python/saved_model/load_v1_in_v2.py +++ b/tensorflow/python/saved_model/load_v1_in_v2.py @@ -138,7 +138,7 @@ def _extract_signatures(self, wrapped, meta_graph_def): for signature_key, signature_def in meta_graph_def.signature_def.items(): if signature_def.inputs: input_items = sorted( - signature_def.inputs.items(), key=lambda item: item[1].name) + signature_def.inputs.items(), key=lambda item: item[0]) original_input_names, input_specs = zip(*input_items) else: original_input_names = [] diff --git a/tensorflow/python/saved_model/load_v1_in_v2_test.py b/tensorflow/python/saved_model/load_v1_in_v2_test.py index b854e588f71c15..22e212cffa4bd8 100644 --- a/tensorflow/python/saved_model/load_v1_in_v2_test.py +++ b/tensorflow/python/saved_model/load_v1_in_v2_test.py @@ -696,6 +696,33 @@ def test_v1_input_ordered(self): self.assertEqual(imported.signatures["serving_default"].inputs[1].name, "input2:0") + def test_resave_signature(self): + # Tests that signatures saved using TF1 can be resaved with TF2. + # See b/211666001 for context. + export_graph = ops.Graph() + with export_graph.as_default(): + a = array_ops.placeholder( + shape=[None, 1], dtype=dtypes.float32, name="input_2") + b = array_ops.placeholder( + shape=[None, 2], dtype=dtypes.float32, name="input_1") + c = array_ops.identity(a) + with session_lib.Session() as session: + path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) + simple_save.simple_save( + session, + path, + inputs={"a": a, "b": b}, + outputs={"c": c}) + imported = load.load(path) + path2 = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid())) + save.save(imported, path2, imported.signatures) + + imported2 = load.load(path2) + self.assertEqual( + imported2.signatures["serving_default"]( + a=constant_op.constant([5.]), + b=constant_op.constant([1., 3.]))["c"].numpy(), 5.) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/summary/writer/writer_test.py b/tensorflow/python/summary/writer/writer_test.py index 19138b1372dea6..9fcac4952f6a25 100644 --- a/tensorflow/python/summary/writer/writer_test.py +++ b/tensorflow/python/summary/writer/writer_test.py @@ -34,6 +34,7 @@ from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import test_util @@ -685,6 +686,16 @@ def testSharing_withExplicitSummaryFileWriters(self): # No more files self.assertRaises(StopIteration, lambda: next(event_paths)) + def testSummaryFileWritersInvalidInput(self): + # Test case for GitHub issue 46909 + logdir = self.get_temp_dir() + with session.Session() as sess: + with self.assertRaises(errors_impl.InvalidArgumentError): + writer = summary_ops_v2.create_file_writer( + logdir=logdir, flush_millis=[1, 2]) + sess.run(writer.init()) + sess.run(writer.flush()) + class FileWriterCacheTest(test.TestCase): """FileWriterCache tests.""" diff --git a/tensorflow/python/tools/saved_model_cli.py b/tensorflow/python/tools/saved_model_cli.py index bbb03f7cc7fdd1..8565983a0f271b 100644 --- a/tensorflow/python/tools/saved_model_cli.py +++ b/tensorflow/python/tools/saved_model_cli.py @@ -24,6 +24,7 @@ from __future__ import print_function import argparse +import ast import os import re import sys @@ -518,7 +519,7 @@ def preprocess_inputs_arg_string(inputs_str): return input_dict -def preprocess_input_exprs_arg_string(input_exprs_str): +def preprocess_input_exprs_arg_string(input_exprs_str, safe=True): """Parses input arg into dictionary that maps input key to python expression. Parses input string in the format of 'input_key=' into a @@ -526,8 +527,10 @@ def preprocess_input_exprs_arg_string(input_exprs_str): Args: input_exprs_str: A string that specifies python expression for input keys. - Each input is separated by semicolon. For each input key: + Each input is separated by semicolon. For each input key: 'input_key=' + safe: Whether to evaluate the python expression as literals or allow + arbitrary calls (e.g. numpy usage). Returns: A dictionary that maps input keys to their values. @@ -542,8 +545,15 @@ def preprocess_input_exprs_arg_string(input_exprs_str): raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow' '"="' % input_exprs_str) input_key, expr = input_raw.split('=', 1) - # ast.literal_eval does not work with numpy expressions - input_dict[input_key] = eval(expr) # pylint: disable=eval-used + if safe: + try: + input_dict[input_key] = ast.literal_eval(expr) + except: + raise RuntimeError( + f'Expression "{expr}" is not a valid python literal.') + else: + # ast.literal_eval does not work with numpy expressions + input_dict[input_key] = eval(expr) # pylint: disable=eval-used return input_dict @@ -656,7 +666,7 @@ def load_inputs_from_input_arg_string(inputs_str, input_exprs_str, tensor_key_feed_dict = {} inputs = preprocess_inputs_arg_string(inputs_str) - input_exprs = preprocess_input_exprs_arg_string(input_exprs_str) + input_exprs = preprocess_input_exprs_arg_string(input_exprs_str, safe=False) input_examples = preprocess_input_examples_arg_string(input_examples_str) for input_tensor_key, (filename, variable_name) in inputs.items(): @@ -920,8 +930,10 @@ def add_run_subparser(subparsers): parser_run.add_argument('--inputs', type=str, default='', help=msg) msg = ('Specifying inputs by python expressions, in the format of' ' "=\'\'", separated by \';\'. ' - 'numpy module is available as \'np\'. ' - 'Will override duplicate input keys from --inputs option.') + 'numpy module is available as \'np\'. Please note that the expression ' + 'will be evaluated as-is, and is susceptible to code injection. ' + 'When this is set, the value will override duplicate input keys from ' + '--inputs option.') parser_run.add_argument('--input_exprs', type=str, default='', help=msg) msg = ( 'Specifying tf.Example inputs as list of dictionaries. For example: ' diff --git a/tensorflow/python/tools/saved_model_cli_test.py b/tensorflow/python/tools/saved_model_cli_test.py index 2580cbd73ca0a9..df43ba0bbb65a0 100644 --- a/tensorflow/python/tools/saved_model_cli_test.py +++ b/tensorflow/python/tools/saved_model_cli_test.py @@ -380,7 +380,7 @@ def testInputPreProcessFormats(self): input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]' input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str) input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string( - input_expr_str) + input_expr_str, safe=False) self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3')) self.assertTrue(input_dict['input2'] == ('file2', None)) print(input_expr_dict['input3']) @@ -416,6 +416,11 @@ def testInputPreProcessExamplesWithStrAndBytes(self): } """, feature) + def testInputPreprocessExampleWithCodeInjection(self): + input_examples_str = 'inputs=os.system("echo hacked")' + with self.assertRaisesRegex(RuntimeError, 'not a valid python literal.'): + saved_model_cli.preprocess_input_examples_arg_string(input_examples_str) + def testInputPreProcessFileNames(self): input_str = (r'inputx=C:\Program Files\data.npz[v:0];' r'input:0=c:\PROGRA~1\data.npy') @@ -432,8 +437,8 @@ def testInputPreProcessErrorBadFormat(self): with self.assertRaises(RuntimeError): saved_model_cli.preprocess_inputs_arg_string(input_str) input_str = 'inputx:np.zeros((5))' - with self.assertRaises(RuntimeError): - saved_model_cli.preprocess_input_exprs_arg_string(input_str) + with self.assertRaisesRegex(RuntimeError, 'format is incorrect'): + saved_model_cli.preprocess_input_exprs_arg_string(input_str, safe=False) def testInputParserNPY(self): x0 = np.array([[1], [2]]) diff --git a/tensorflow/python/tpu/tpu_test.py b/tensorflow/python/tpu/tpu_test.py index 8d10d9404d8bde..0858741a4ba491 100644 --- a/tensorflow/python/tpu/tpu_test.py +++ b/tensorflow/python/tpu/tpu_test.py @@ -36,6 +36,7 @@ from tensorflow.python.tpu import tpu from tensorflow.python.tpu import tpu_feed from tensorflow.python.tpu import training_loop +from tensorflow.python.tpu.ops import tpu_ops class TPUContextTest(test.TestCase): @@ -169,6 +170,51 @@ def test_prune_unconnected_ops(self): graph.get_operation_by_name("import/y").get_attr( tpu._TPU_REPLICATE_ATTR) + +class TPUOpsTest(test.TestCase): + + def test_all_to_all_zero_split_count(self): + with self.assertRaisesRegex( + ValueError, "split_count 0 must at least be one"): + tpu_ops.all_to_all( + x=[0.0, 0.1652, 0.6543], + group_assignment=[1, -1], + concat_dimension=0, + split_dimension=0, + split_count=0) + + def test_all_to_all_group_assignment_wrong_shape(self): + with self.assertRaisesRegex( + ValueError, "group_assignment must have rank 2"): + tpu_ops.all_to_all( + x=[0.0, 0.1652, 0.6543], + group_assignment=[1, -1], + concat_dimension=0, + split_dimension=0, + split_count=2) + + def test_all_to_all_split_count_not_equal_to_group_assignment_shape(self): + with self.assertRaisesRegex( + ValueError, "split_count 1 must equal the size of the second dimension " + "of group_assignment 2"): + tpu_ops.all_to_all( + x=[0.0, 0.1652, 0.6543], + group_assignment=[[0, 1], [2, 3]], + concat_dimension=0, + split_dimension=0, + split_count=1) + + def test_all_to_all_split_count_not_divide_input_shape(self): + with self.assertRaisesRegex( + ValueError, "input dimension 3 not divisible by split_count 2"): + tpu_ops.all_to_all( + x=[[0.0], [0.1652], [0.6543]], + group_assignment=[[0, 1], [2, 3]], + concat_dimension=1, + split_dimension=0, + split_count=2) + + def do_einsum(): a = array_ops.placeholder(dtype=dtypes.float32, name="a", shape=[2, 3, 4]) b = array_ops.placeholder(dtype=dtypes.float32, name="b", shape=[2, 4, 5]) diff --git a/tensorflow/stream_executor/lib/BUILD b/tensorflow/stream_executor/lib/BUILD index d0f57112471860..4cf31bad7079d2 100644 --- a/tensorflow/stream_executor/lib/BUILD +++ b/tensorflow/stream_executor/lib/BUILD @@ -1,6 +1,6 @@ load("//tensorflow:tensorflow.bzl", "filegroup") load("//tensorflow/core/platform:rules_cc.bzl", "cc_library") -load("//tensorflow:tensorflow.bzl", "if_windows", "tf_cc_test") +load("//tensorflow:tensorflow.bzl", "if_windows") load("//tensorflow/stream_executor:build_defs.bzl", "stream_executor_friends") package( @@ -36,21 +36,10 @@ cc_library( deps = [ "//tensorflow/core:lib", "//tensorflow/core:protos_all_cc", + "//tensorflow/core/platform:statusor", "//tensorflow/stream_executor/platform", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", ], ) - -tf_cc_test( - name = "statusor_test", - size = "small", - srcs = ["statusor_test.cc"], - deps = [ - ":lib", - "//tensorflow/core:lib", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - ], -) diff --git a/tensorflow/stream_executor/lib/statusor.h b/tensorflow/stream_executor/lib/statusor.h index 2243fb1b34aa7d..0d574ce6984456 100644 --- a/tensorflow/stream_executor/lib/statusor.h +++ b/tensorflow/stream_executor/lib/statusor.h @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,384 +13,18 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// StatusOr is the union of a Status object and a T object. StatusOr models -// the concept of an object that is either a value, or an error Status -// explaining why such a value is not present. To this end, StatusOr does not -// allow its Status value to be Status::OK. -// -// The primary use-case for StatusOr is as the return value of a -// function which may fail. -// -// Example client usage for a StatusOr, where T is not a pointer: -// -// StatusOr result = DoBigCalculationThatCouldFail(); -// if (result.ok()) { -// float answer = result.ValueOrDie(); -// printf("Big calculation yielded: %f", answer); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example client usage for a StatusOr: -// -// StatusOr result = FooFactory::MakeNewFoo(arg); -// if (result.ok()) { -// std::unique_ptr foo(result.ValueOrDie()); -// foo->DoSomethingCool(); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example client usage for a StatusOr>: -// -// StatusOr> result = FooFactory::MakeNewFoo(arg); -// if (result.ok()) { -// std::unique_ptr foo = std::move(result.ValueOrDie()); -// foo->DoSomethingCool(); -// } else { -// LOG(ERROR) << result.status(); -// } -// -// Example factory implementation returning StatusOr: -// -// StatusOr FooFactory::MakeNewFoo(int arg) { -// if (arg <= 0) { -// return tensorflow::InvalidArgument("Arg must be positive"); -// } else { -// return new Foo(arg); -// } -// } -// -// Note that the assignment operators require that destroying the currently -// stored value cannot invalidate the argument; in other words, the argument -// cannot be an alias for the current value, or anything owned by the current -// value. #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ -#include "tensorflow/core/platform/macros.h" +#include "tensorflow/core/platform/statusor.h" #include "tensorflow/stream_executor/lib/status.h" -#include "tensorflow/stream_executor/lib/statusor_internals.h" namespace stream_executor { namespace port { -#if defined(__clang__) -// Only clang supports warn_unused_result as a type annotation. -template -class TF_MUST_USE_RESULT StatusOr; -#endif - -template -class StatusOr : private internal_statusor::StatusOrData, - private internal_statusor::TraitsBase< - std::is_copy_constructible::value, - std::is_move_constructible::value> { - template - friend class StatusOr; - - typedef internal_statusor::StatusOrData Base; - - public: - typedef T element_type; // DEPRECATED: use `value_type`. - typedef T value_type; - - // Constructs a new StatusOr with Status::UNKNOWN status. This is marked - // 'explicit' to try to catch cases like 'return {};', where people think - // StatusOr> will be initialized with an empty vector, - // instead of a Status::UNKNOWN status. - explicit StatusOr(); - - // StatusOr will be copy constructible/assignable if T is copy - // constructible. - StatusOr(const StatusOr&) = default; - StatusOr& operator=(const StatusOr&) = default; - - // StatusOr will be move constructible/assignable if T is move - // constructible. - StatusOr(StatusOr&&) = default; - StatusOr& operator=(StatusOr&&) = default; - - // Conversion copy/move constructor, T must be convertible from U. - template ::value>::type* = nullptr> - StatusOr(const StatusOr& other); - template ::value>::type* = nullptr> - StatusOr(StatusOr&& other); - - // Conversion copy/move assignment operator, T must be convertible from U. - template ::value>::type* = nullptr> - StatusOr& operator=(const StatusOr& other); - template ::value>::type* = nullptr> - StatusOr& operator=(StatusOr&& other); - - // Constructs a new StatusOr with the given value. After calling this - // constructor, calls to ValueOrDie() will succeed, and calls to status() will - // return OK. - // - // NOTE: Not explicit - we want to use StatusOr as a return type - // so it is convenient and sensible to be able to do 'return T()' - // when the return type is StatusOr. - // - // REQUIRES: T is copy constructible. - StatusOr(const T& value); - - // Constructs a new StatusOr with the given non-ok status. After calling - // this constructor, calls to ValueOrDie() will CHECK-fail. - // - // NOTE: Not explicit - we want to use StatusOr as a return - // value, so it is convenient and sensible to be able to do 'return - // Status()' when the return type is StatusOr. - // - // REQUIRES: !status.ok(). This requirement is DCHECKed. - // In optimized builds, passing Status::OK() here will have the effect - // of passing tensorflow::error::INTERNAL as a fallback. - StatusOr(const Status& status); - StatusOr& operator=(const Status& status); - - // TODO(b/62186997): Add operator=(T) overloads. - - // Similar to the `const T&` overload. - // - // REQUIRES: T is move constructible. - StatusOr(T&& value); - - // RValue versions of the operations declared above. - StatusOr(Status&& status); - StatusOr& operator=(Status&& status); - - // Returns this->status().ok() - bool ok() const { return this->status_.ok(); } - - // Returns a reference to our status. If this contains a T, then - // returns Status::OK(). - const Status& status() const &; - Status status() &&; - - // Returns a reference to our current value, or CHECK-fails if !this->ok(). - // - // Note: for value types that are cheap to copy, prefer simple code: - // - // T value = statusor.ValueOrDie(); - // - // Otherwise, if the value type is expensive to copy, but can be left - // in the StatusOr, simply assign to a reference: - // - // T& value = statusor.ValueOrDie(); // or `const T&` - // - // Otherwise, if the value type supports an efficient move, it can be - // used as follows: - // - // T value = std::move(statusor).ValueOrDie(); - // - // The std::move on statusor instead of on the whole expression enables - // warnings about possible uses of the statusor object after the move. - // C++ style guide waiver for ref-qualified overloads granted in cl/143176389 - // See go/ref-qualifiers for more details on such overloads. - const T& ValueOrDie() const &; - T& ValueOrDie() &; - const T&& ValueOrDie() const &&; - T&& ValueOrDie() &&; - - // Returns a reference to the current value. - // - // REQUIRES: this->ok() == true, otherwise the behavior is undefined. - // - // Use this->ok() or `operator bool()` to verify that there is a current - // value. Alternatively, see ValueOrDie() for a similar API that guarantees - // CHECK-failing if there is no current value. - const T& operator*() const&; - T& operator*() &; - const T&& operator*() const&&; - T&& operator*() &&; - - // Returns a pointer to the current value. - // - // REQUIRES: this->ok() == true, otherwise the behavior is undefined. - // - // Use this->ok() or `operator bool()` to verify that there is a current - // value. - const T* operator->() const; - T* operator->(); - - T ConsumeValueOrDie() { return std::move(ValueOrDie()); } - - // Ignores any errors. This method does nothing except potentially suppress - // complaints from any tools that are checking that errors are not dropped on - // the floor. - void IgnoreError() const; -}; - -//////////////////////////////////////////////////////////////////////////////// -// Implementation details for StatusOr - -template -StatusOr::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {} - -template -StatusOr::StatusOr(const T& value) : Base(value) {} - -template -StatusOr::StatusOr(const Status& status) : Base(status) {} - -template -StatusOr& StatusOr::operator=(const Status& status) { - this->Assign(status); - return *this; -} - -template -StatusOr::StatusOr(T&& value) : Base(std::move(value)) {} - -template -StatusOr::StatusOr(Status&& status) : Base(std::move(status)) {} - -template -StatusOr& StatusOr::operator=(Status&& status) { - this->Assign(std::move(status)); - return *this; -} - -template -template ::value>::type*> -inline StatusOr::StatusOr(const StatusOr& other) - : Base(static_cast::Base&>(other)) {} - -template -template ::value>::type*> -inline StatusOr& StatusOr::operator=(const StatusOr& other) { - if (other.ok()) - this->Assign(other.ValueOrDie()); - else - this->Assign(other.status()); - return *this; -} - -template -template ::value>::type*> -inline StatusOr::StatusOr(StatusOr&& other) - : Base(static_cast::Base&&>(other)) {} - -template -template ::value>::type*> -inline StatusOr& StatusOr::operator=(StatusOr&& other) { - if (other.ok()) { - this->Assign(std::move(other).ValueOrDie()); - } else { - this->Assign(std::move(other).status()); - } - return *this; -} - -template -const Status& StatusOr::status() const & { - return this->status_; -} -template -Status StatusOr::status() && { - // Note that we copy instead of moving the status here so that - // ~StatusOrData() can call ok() without invoking UB. - return ok() ? Status::OK() : this->status_; -} - -template -const T& StatusOr::ValueOrDie() const & { - this->EnsureOk(); - return this->data_; -} - -template -T& StatusOr::ValueOrDie() & { - this->EnsureOk(); - return this->data_; -} - -template -const T&& StatusOr::ValueOrDie() const && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -T&& StatusOr::ValueOrDie() && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -const T* StatusOr::operator->() const { - this->EnsureOk(); - return &this->data_; -} - -template -T* StatusOr::operator->() { - this->EnsureOk(); - return &this->data_; -} - -template -const T& StatusOr::operator*() const& { - this->EnsureOk(); - return this->data_; -} - -template -T& StatusOr::operator*() & { - this->EnsureOk(); - return this->data_; -} - -template -const T&& StatusOr::operator*() const&& { - this->EnsureOk(); - return std::move(this->data_); -} - -template -T&& StatusOr::operator*() && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -void StatusOr::IgnoreError() const { - // no-op -} +using tensorflow::StatusOr; } // namespace port - -#define TF_ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ - TF_ASSERT_OK_AND_ASSIGN_IMPL( \ - TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, \ - rexpr); - -#define TF_ASSERT_OK_AND_ASSIGN_IMPL(statusor, lhs, rexpr) \ - auto statusor = (rexpr); \ - ASSERT_TRUE(statusor.status().ok()) << statusor.status(); \ - lhs = std::move(statusor.ValueOrDie()) - -#define TF_STATUS_MACROS_CONCAT_NAME(x, y) TF_STATUS_MACROS_CONCAT_IMPL(x, y) -#define TF_STATUS_MACROS_CONCAT_IMPL(x, y) x##y - -#define TF_ASSIGN_OR_RETURN(lhs, rexpr) \ - TF_ASSIGN_OR_RETURN_IMPL( \ - TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr) - -#define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \ - auto statusor = (rexpr); \ - if (TF_PREDICT_FALSE(!statusor.ok())) { \ - return statusor.status(); \ - } \ - lhs = std::move(statusor.ValueOrDie()) - } // namespace stream_executor #endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_ diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index a9da708bb534c3..37559bf1f8d0a8 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -48,7 +48,7 @@ load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo") # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.5.0" +VERSION = "2.5.3" VERSION_MAJOR = VERSION.split(".")[0] two_gpu_tags = ["requires-gpu-nvidia:2", "notap", "manual", "no_pip"] diff --git a/tensorflow/tools/ci_build/builds/pip_new.sh b/tensorflow/tools/ci_build/builds/pip_new.sh index a4887adcb4e1f8..601d9332735dc0 100755 --- a/tensorflow/tools/ci_build/builds/pip_new.sh +++ b/tensorflow/tools/ci_build/builds/pip_new.sh @@ -255,6 +255,9 @@ PIP_TEST_ROOT=${TF_PIP_TEST_ROOT:-$DEFAULT_PIP_TEST_ROOT} BUILD_BOTH_GPU_PACKAGES=${TF_BUILD_BOTH_GPU_PACKAGES:-$DEFAULT_BUILD_BOTH_GPU_PACKAGES} BUILD_BOTH_CPU_PACKAGES=${TF_BUILD_BOTH_CPU_PACKAGES:-$DEFAULT_BUILD_BOTH_CPU_PACKAGES} +# Override breaking change in setuptools v60 (https://github.com/pypa/setuptools/pull/2896) +export SETUPTOOLS_USE_DISTUTILS=stdlib + # Local variables PIP_WHL_DIR="${KOKORO_ARTIFACTS_DIR}/tensorflow/${PIP_TEST_ROOT}/whl" mkdir -p "${PIP_WHL_DIR}" diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 354657a7bfcf7a..cc8287b7e6cf85 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -50,7 +50,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.5.0' +_VERSION = '2.5.3' # We use the same setup.py for all tensorflow_* packages and for the nightly @@ -101,7 +101,7 @@ # They are updated during the release process # When updating these, please also update the nightly versions below 'tensorboard ~= 2.5', - 'tensorflow-estimator >= 2.5.0rc0 , < 2.6.0', + 'tensorflow-estimator >= 2.5.0 , < 2.6.0', # TODO(scottzhu): OSS keras hasn't been formally released yet. # Use keras-nightly at the moment. 'keras-nightly ~= 2.5.0.dev', @@ -343,6 +343,7 @@ def find_files(pattern, root): 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', diff --git a/tensorflow/workspace2.bzl b/tensorflow/workspace2.bzl index efc50709b8fba2..5560251f00176f 100644 --- a/tensorflow/workspace2.bzl +++ b/tensorflow/workspace2.bzl @@ -622,12 +622,12 @@ def _tf_repositories(): tf_http_archive( name = "curl", build_file = "//third_party:curl.BUILD", - sha256 = "3b4378156ba09e224008e81dcce854b7ce4d182b1f9cfb97fe5ed9e9c18c6bd3", - strip_prefix = "curl-7.76.0", + sha256 = "ed936c0b02c06d42cf84b39dd12bb14b62d77c7c4e875ade022280df5dcc81d7", + strip_prefix = "curl-7.78.0", system_build_file = "//third_party/systemlibs:curl.BUILD", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.76.0.tar.gz", - "https://curl.haxx.se/download/curl-7.76.0.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.78.0.tar.gz", + "https://curl.haxx.se/download/curl-7.78.0.tar.gz", ], ) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index b071f87208d405..f47ae019da3fd3 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -40,6 +40,8 @@ cc_library( "lib/asyn-ares.c", "lib/asyn.h", "lib/base64.c", + "lib/bufref.c", + "lib/bufref.h", "lib/c-hyper.c", "lib/c-hyper.h", "lib/config-amigaos.h", diff --git a/third_party/icu/data/BUILD.bazel b/third_party/icu/data/BUILD.bazel index 80ea92ce9b47d4..ded85987f911f6 100644 --- a/third_party/icu/data/BUILD.bazel +++ b/third_party/icu/data/BUILD.bazel @@ -19,9 +19,28 @@ exports_files(["LICENSE"]) # $ ICU_DATA_FILTER_FILE=filters.json ./runConfigureICU Linux # $ make clean && make # $ cd data/out/tmp -# $ genccode icudt64l.dat -# $ echo 'U_CAPI const void * U_EXPORT2 uprv_getICUData_conversion() { return icudt64l_dat.bytes; }' >> icudt64l_dat.c -# This creates icudt64l_dat.c, which you can move, rename, gzip, then split. +# $ genccode icudt70l.dat # Note: this number must match version, and below too! +# $ echo 'U_CAPI const void * U_EXPORT2 uprv_getICUData_conversion() { return icudt70l_dat.bytes; }' >> icudt70l_dat.c +# +# This creates icudt70l_dat.c, which you can move, rename, gzip, then split, +# for example (but you can change to other numbers): +# $ cp icudt70l_dat.c icu_conversion_data.c +# $ gzip icu_conversion_data.c +# # Note: make sure you don't forget the last . below! +# $ split -a 3 -b 100000 icu_conversion_data.c.gz icu_conversion_data.c.gz. +# +# Then, copy the generated files to this directory, removing existing ones. +# +# The current files have been generated by this filter (in filters.json): +# { +# "localeFilter": { +# "filterType": "language", +# "includelist": [ +# "en" +# ] +# } +# } +# Please make sure to keep this updated if you change the data files. filegroup( name = "conversion_files", srcs = glob(["icu_conversion_data.c.gz.*"]), diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aa b/third_party/icu/data/icu_conversion_data.c.gz.aa deleted file mode 100644 index 543b6615708830..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.aa and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaa b/third_party/icu/data/icu_conversion_data.c.gz.aaa new file mode 100644 index 00000000000000..b11bc8e1c2b268 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaa differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aab b/third_party/icu/data/icu_conversion_data.c.gz.aab new file mode 100644 index 00000000000000..87460f63f97cb7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aab differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aac b/third_party/icu/data/icu_conversion_data.c.gz.aac new file mode 100644 index 00000000000000..57ca5485de4bde Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aac differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aad b/third_party/icu/data/icu_conversion_data.c.gz.aad new file mode 100644 index 00000000000000..a182512aab6a60 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aad differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aae b/third_party/icu/data/icu_conversion_data.c.gz.aae new file mode 100644 index 00000000000000..4527fa522cec12 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aae differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaf b/third_party/icu/data/icu_conversion_data.c.gz.aaf new file mode 100644 index 00000000000000..e1dc807b347f85 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aag b/third_party/icu/data/icu_conversion_data.c.gz.aag new file mode 100644 index 00000000000000..ed6946008feec8 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aag differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aah b/third_party/icu/data/icu_conversion_data.c.gz.aah new file mode 100644 index 00000000000000..1a474bca1fe728 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aah differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aai b/third_party/icu/data/icu_conversion_data.c.gz.aai new file mode 100644 index 00000000000000..4a78d2f18c6f8b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aai differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaj b/third_party/icu/data/icu_conversion_data.c.gz.aaj new file mode 100644 index 00000000000000..5b40d555fdf22e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aak b/third_party/icu/data/icu_conversion_data.c.gz.aak new file mode 100644 index 00000000000000..e43a5cb2b7b7a2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aak differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aal b/third_party/icu/data/icu_conversion_data.c.gz.aal new file mode 100644 index 00000000000000..8856e1e2cb49da Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aal differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aam b/third_party/icu/data/icu_conversion_data.c.gz.aam new file mode 100644 index 00000000000000..5d0d5e3fae793f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aam differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aan b/third_party/icu/data/icu_conversion_data.c.gz.aan new file mode 100644 index 00000000000000..9cbff7140acca4 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aan differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aao b/third_party/icu/data/icu_conversion_data.c.gz.aao new file mode 100644 index 00000000000000..b3e8eab98d0e86 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aao differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aap b/third_party/icu/data/icu_conversion_data.c.gz.aap new file mode 100644 index 00000000000000..a3ec92a470fd8c Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aap differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaq b/third_party/icu/data/icu_conversion_data.c.gz.aaq new file mode 100644 index 00000000000000..cdcdc42024f386 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aar b/third_party/icu/data/icu_conversion_data.c.gz.aar new file mode 100644 index 00000000000000..b3d4a2b8396f8c Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aar differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aas b/third_party/icu/data/icu_conversion_data.c.gz.aas new file mode 100644 index 00000000000000..30dd37ff26925e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aas differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aat b/third_party/icu/data/icu_conversion_data.c.gz.aat new file mode 100644 index 00000000000000..f3e8330204b4aa Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aat differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aau b/third_party/icu/data/icu_conversion_data.c.gz.aau new file mode 100644 index 00000000000000..bd503d27300027 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aau differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aav b/third_party/icu/data/icu_conversion_data.c.gz.aav new file mode 100644 index 00000000000000..7be56870f45656 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aav differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaw b/third_party/icu/data/icu_conversion_data.c.gz.aaw new file mode 100644 index 00000000000000..40057bbc81905a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aax b/third_party/icu/data/icu_conversion_data.c.gz.aax new file mode 100644 index 00000000000000..e3ec8117d5aa65 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aax differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aay b/third_party/icu/data/icu_conversion_data.c.gz.aay new file mode 100644 index 00000000000000..b0c0b5a171b9a0 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aay differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aaz b/third_party/icu/data/icu_conversion_data.c.gz.aaz new file mode 100644 index 00000000000000..8cdd177cfc5308 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aaz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ab b/third_party/icu/data/icu_conversion_data.c.gz.ab deleted file mode 100644 index d8cd5108e62fb0..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ab and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aba b/third_party/icu/data/icu_conversion_data.c.gz.aba new file mode 100644 index 00000000000000..6a892bd60db59d Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aba differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abb b/third_party/icu/data/icu_conversion_data.c.gz.abb new file mode 100644 index 00000000000000..ce05de8084bf6a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abc b/third_party/icu/data/icu_conversion_data.c.gz.abc new file mode 100644 index 00000000000000..e42ebce1ded76f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abd b/third_party/icu/data/icu_conversion_data.c.gz.abd new file mode 100644 index 00000000000000..04be858c2e71fa Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abd differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abe b/third_party/icu/data/icu_conversion_data.c.gz.abe new file mode 100644 index 00000000000000..f27572bf716a88 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abe differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abf b/third_party/icu/data/icu_conversion_data.c.gz.abf new file mode 100644 index 00000000000000..b1cd4256152abd Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abg b/third_party/icu/data/icu_conversion_data.c.gz.abg new file mode 100644 index 00000000000000..f071eb404cef13 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abh b/third_party/icu/data/icu_conversion_data.c.gz.abh new file mode 100644 index 00000000000000..fcbe80a605b523 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abh differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abi b/third_party/icu/data/icu_conversion_data.c.gz.abi new file mode 100644 index 00000000000000..07b5626d49f7a4 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abi differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abj b/third_party/icu/data/icu_conversion_data.c.gz.abj new file mode 100644 index 00000000000000..17db0aebcaa848 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abk b/third_party/icu/data/icu_conversion_data.c.gz.abk new file mode 100644 index 00000000000000..1df6d71755c019 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abk differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abl b/third_party/icu/data/icu_conversion_data.c.gz.abl new file mode 100644 index 00000000000000..19065efa8bc25b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abl differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abm b/third_party/icu/data/icu_conversion_data.c.gz.abm new file mode 100644 index 00000000000000..97fbe53304eff2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abm differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abn b/third_party/icu/data/icu_conversion_data.c.gz.abn new file mode 100644 index 00000000000000..8b47b3c94def78 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abn differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abo b/third_party/icu/data/icu_conversion_data.c.gz.abo new file mode 100644 index 00000000000000..9985a2de553270 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abo differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abp b/third_party/icu/data/icu_conversion_data.c.gz.abp new file mode 100644 index 00000000000000..ae0a812b9db095 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abp differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abq b/third_party/icu/data/icu_conversion_data.c.gz.abq new file mode 100644 index 00000000000000..8b071f0e6a858e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abr b/third_party/icu/data/icu_conversion_data.c.gz.abr new file mode 100644 index 00000000000000..f00c95e9246f74 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abr differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abs b/third_party/icu/data/icu_conversion_data.c.gz.abs new file mode 100644 index 00000000000000..c0571dc9adf4fc Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abs differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abt b/third_party/icu/data/icu_conversion_data.c.gz.abt new file mode 100644 index 00000000000000..f6c75209c83128 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abt differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abu b/third_party/icu/data/icu_conversion_data.c.gz.abu new file mode 100644 index 00000000000000..7c049c5550077b Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abu differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abv b/third_party/icu/data/icu_conversion_data.c.gz.abv new file mode 100644 index 00000000000000..a533067e76125a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abv differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abw b/third_party/icu/data/icu_conversion_data.c.gz.abw new file mode 100644 index 00000000000000..8ad6abb99516e5 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abx b/third_party/icu/data/icu_conversion_data.c.gz.abx new file mode 100644 index 00000000000000..54e0515a944a09 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abx differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aby b/third_party/icu/data/icu_conversion_data.c.gz.aby new file mode 100644 index 00000000000000..6be26e2dda1f5f Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aby differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.abz b/third_party/icu/data/icu_conversion_data.c.gz.abz new file mode 100644 index 00000000000000..817dd47d5b973d Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.abz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ac b/third_party/icu/data/icu_conversion_data.c.gz.ac deleted file mode 100644 index bde21d16f57c16..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ac and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aca b/third_party/icu/data/icu_conversion_data.c.gz.aca new file mode 100644 index 00000000000000..1fac65927fd443 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aca differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acb b/third_party/icu/data/icu_conversion_data.c.gz.acb new file mode 100644 index 00000000000000..f3e6da1f7d0450 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acc b/third_party/icu/data/icu_conversion_data.c.gz.acc new file mode 100644 index 00000000000000..1fb0cc49281c37 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acd b/third_party/icu/data/icu_conversion_data.c.gz.acd new file mode 100644 index 00000000000000..60bfeba83255d6 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acd differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ace b/third_party/icu/data/icu_conversion_data.c.gz.ace new file mode 100644 index 00000000000000..7b60fe5a3ac8d9 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ace differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acf b/third_party/icu/data/icu_conversion_data.c.gz.acf new file mode 100644 index 00000000000000..dd8ebff2963c99 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acg b/third_party/icu/data/icu_conversion_data.c.gz.acg new file mode 100644 index 00000000000000..c5015757d328e7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ach b/third_party/icu/data/icu_conversion_data.c.gz.ach new file mode 100644 index 00000000000000..10c50c1d96a574 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ach differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aci b/third_party/icu/data/icu_conversion_data.c.gz.aci new file mode 100644 index 00000000000000..75be388aee0c11 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aci differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acj b/third_party/icu/data/icu_conversion_data.c.gz.acj new file mode 100644 index 00000000000000..f55b68e633f400 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acj differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ack b/third_party/icu/data/icu_conversion_data.c.gz.ack new file mode 100644 index 00000000000000..121d97423eb7ea Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ack differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acl b/third_party/icu/data/icu_conversion_data.c.gz.acl new file mode 100644 index 00000000000000..eafb3b60b47383 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acl differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acm b/third_party/icu/data/icu_conversion_data.c.gz.acm new file mode 100644 index 00000000000000..f7a3b5617bc8c5 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acm differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acn b/third_party/icu/data/icu_conversion_data.c.gz.acn new file mode 100644 index 00000000000000..eff17429e724fd Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acn differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aco b/third_party/icu/data/icu_conversion_data.c.gz.aco new file mode 100644 index 00000000000000..8388dc5c141374 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.aco differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acp b/third_party/icu/data/icu_conversion_data.c.gz.acp new file mode 100644 index 00000000000000..1e9a4bc18ed96a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acp differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acq b/third_party/icu/data/icu_conversion_data.c.gz.acq new file mode 100644 index 00000000000000..51a5737930a6a7 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acq differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acr b/third_party/icu/data/icu_conversion_data.c.gz.acr new file mode 100644 index 00000000000000..96e27c26624b34 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acr differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acs b/third_party/icu/data/icu_conversion_data.c.gz.acs new file mode 100644 index 00000000000000..30b0970756d7e3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acs differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.act b/third_party/icu/data/icu_conversion_data.c.gz.act new file mode 100644 index 00000000000000..21b9688e5e774e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.act differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acu b/third_party/icu/data/icu_conversion_data.c.gz.acu new file mode 100644 index 00000000000000..cea7d355d07ab2 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acu differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acv b/third_party/icu/data/icu_conversion_data.c.gz.acv new file mode 100644 index 00000000000000..8ddf19818ced08 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acv differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acw b/third_party/icu/data/icu_conversion_data.c.gz.acw new file mode 100644 index 00000000000000..c9c2bceaaf1930 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acw differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acx b/third_party/icu/data/icu_conversion_data.c.gz.acx new file mode 100644 index 00000000000000..0ca1d9aaf65aa3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acx differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acy b/third_party/icu/data/icu_conversion_data.c.gz.acy new file mode 100644 index 00000000000000..fbc2459b6a10ab Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acy differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.acz b/third_party/icu/data/icu_conversion_data.c.gz.acz new file mode 100644 index 00000000000000..862436c9459487 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.acz differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ad b/third_party/icu/data/icu_conversion_data.c.gz.ad deleted file mode 100644 index f476988a0b24fb..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ad and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ada b/third_party/icu/data/icu_conversion_data.c.gz.ada new file mode 100644 index 00000000000000..6034e047321250 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ada differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adb b/third_party/icu/data/icu_conversion_data.c.gz.adb new file mode 100644 index 00000000000000..07b519b21c089a Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adb differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adc b/third_party/icu/data/icu_conversion_data.c.gz.adc new file mode 100644 index 00000000000000..12d52c54e02a9e Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adc differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.add b/third_party/icu/data/icu_conversion_data.c.gz.add new file mode 100644 index 00000000000000..e9995953c924b3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.add differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ade b/third_party/icu/data/icu_conversion_data.c.gz.ade new file mode 100644 index 00000000000000..292d09cfd1d457 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.ade differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adf b/third_party/icu/data/icu_conversion_data.c.gz.adf new file mode 100644 index 00000000000000..dc2c28d019b7b3 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adf differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adg b/third_party/icu/data/icu_conversion_data.c.gz.adg new file mode 100644 index 00000000000000..c152c80b1d1ac0 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adg differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.adh b/third_party/icu/data/icu_conversion_data.c.gz.adh new file mode 100644 index 00000000000000..9fcb83e56560b1 Binary files /dev/null and b/third_party/icu/data/icu_conversion_data.c.gz.adh differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ae b/third_party/icu/data/icu_conversion_data.c.gz.ae deleted file mode 100644 index 3388b38c1a2b7a..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ae and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.af b/third_party/icu/data/icu_conversion_data.c.gz.af deleted file mode 100644 index 344e3925f39d60..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.af and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ag b/third_party/icu/data/icu_conversion_data.c.gz.ag deleted file mode 100644 index 249ffddde77176..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ag and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ah b/third_party/icu/data/icu_conversion_data.c.gz.ah deleted file mode 100644 index 8893be204197a0..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ah and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.ai b/third_party/icu/data/icu_conversion_data.c.gz.ai deleted file mode 100644 index e6251e3a11c5b2..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.ai and /dev/null differ diff --git a/third_party/icu/data/icu_conversion_data.c.gz.aj b/third_party/icu/data/icu_conversion_data.c.gz.aj deleted file mode 100644 index 3e1dc684c93176..00000000000000 Binary files a/third_party/icu/data/icu_conversion_data.c.gz.aj and /dev/null differ diff --git a/third_party/icu/udata.patch b/third_party/icu/udata.patch index 0b65e4ed3886f1..f31a604c15908a 100644 --- a/third_party/icu/udata.patch +++ b/third_party/icu/udata.patch @@ -41,7 +41,8 @@ diff -ru a/icu4c/source/common/udata.cpp b/icu4c/source/common/udata.cpp } - */ + - #if U_PLATFORM_HAS_WINUWP_API == 0 // Windows UWP Platform does not support dll icu data at this time + #if !defined(ICU_DATA_DIR_WINDOWS) + // When using the Windows system data, we expect only a single data file. setCommonICUDataPointer(&U_ICUDATA_ENTRY_POINT, FALSE, pErrorCode); { diff -ru a/icu4c/source/common/unicode/uconfig.h b/icu4c/source/common/unicode/uconfig.h diff --git a/third_party/icu/workspace.bzl b/third_party/icu/workspace.bzl index e4ed9669e0c7ee..c2ebd557f77a50 100644 --- a/third_party/icu/workspace.bzl +++ b/third_party/icu/workspace.bzl @@ -2,14 +2,16 @@ load("//third_party:repo.bzl", "tf_http_archive") +# NOTE: If you upgrade this, generate the data files by following the +# instructions in third_party/icu/data/BUILD def repo(): tf_http_archive( name = "icu", - strip_prefix = "icu-release-64-2", - sha256 = "dfc62618aa4bd3ca14a3df548cd65fe393155edd213e49c39f3a30ccd618fc27", + strip_prefix = "icu-release-69-1", + sha256 = "3144e17a612dda145aa0e4acb3caa27a5dae4e26edced64bc351c43d5004af53", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/unicode-org/icu/archive/release-64-2.zip", - "https://github.com/unicode-org/icu/archive/release-64-2.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/unicode-org/icu/archive/release-69-1.zip", + "https://github.com/unicode-org/icu/archive/release-69-1.zip", ], build_file = "//third_party/icu:BUILD.bazel", system_build_file = "//third_party/icu:BUILD.system",