diff --git a/.zenodo.json b/.zenodo.json
new file mode 100644
index 00000000000000..7161180c51ae3e
--- /dev/null
+++ b/.zenodo.json
@@ -0,0 +1,13 @@
+{
+ "description": "TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML-powered applications.",
+ "license": "Apache-2.0",
+ "title": "TensorFlow",
+ "upload_type": "software",
+ "creators": [
+ {
+ "name": "TensorFlow Developers"
+ }
+ ],
+ "access_right": "open",
+ "notes": "Specific TensorFlow versions can be found in the \"Versions\" list on the right side of this page.
See the full list of authors on GitHub."
+}
diff --git a/RELEASE.md b/RELEASE.md
index 87631f9de50d6b..e1ff7153cd8626 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,3 +1,193 @@
+# Release 2.2.3
+
+This release introduces several vulnerability fixes:
+
+ * Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512))
+ * Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514))
+ * Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513))
+ * Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515))
+ * Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516))
+ * Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517))
+ * Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518))
+ * Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519))
+ * Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521))
+ * Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520))
+ * Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522))
+ * Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523))
+ * Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524))
+ * Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525))
+ * Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526))
+ * Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527))
+ * Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528))
+ * Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530))
+ * Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529))
+ * Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531))
+ * Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532))
+ * Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533))
+ * Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535))
+ * Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534))
+ * Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537))
+ * Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536))
+ * Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538))
+ * Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540))
+ * Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542))
+ * Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541))
+ * Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544))
+ * Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543))
+ * Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545))
+ * Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546))
+ * Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547))
+ * Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548))
+ * Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549))
+ * Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550))
+ * Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551))
+ * Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553))
+ * Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552))
+ * Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554))
+ * Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555))
+ * Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557))
+ * Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556))
+ * Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558))
+ * Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559))
+ * Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560))
+ * Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561))
+ * Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562))
+ * Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563))
+ * Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564))
+ * Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565))
+ * Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566))
+ * Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568))
+ * Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567))
+ * Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570))
+ * Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569))
+ * Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571))
+ * Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572))
+ * Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575))
+ * Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573))
+ * Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574))
+ * Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576))
+ * Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577))
+ * Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580))
+ * Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578))
+ * Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579))
+ * Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581))
+ * Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582))
+ * Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584))
+ * Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583))
+ * Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585))
+ * Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586))
+ * Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587))
+ * Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589))
+ * Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588))
+ * Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590))
+ * Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592))
+ * Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591))
+ * Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595))
+ * Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594))
+ * Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596))
+ * Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593))
+ * Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597))
+ * Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598))
+ * Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599))
+ * Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600))
+ * Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602))
+ * Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604))
+ * Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601))
+ * Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605))
+ * Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603))
+ * Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606))
+ * Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608))
+ * Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609))
+ * Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607))
+ * Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611))
+ * Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610))
+ * Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612))
+ * Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613))
+ * Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614))
+ * Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615))
+ * Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616))
+ * Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618))
+ * Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617))
+ * Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619))
+ * Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539))
+ * Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286).
+
+# Release 2.2.2
+
+## Bug Fixes and Other Changes
+* Fixes an access to unitialized memory in Eigen code
+ ([CVE-2020-26266](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26266))
+* Fixes a security vulnerability caused by lack of validation in
+ `tf.raw_ops.DataFormatVecPermute` and `tf.raw_ops.DataFormatDimMap`
+ ([CVE-2020-26267](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26267))
+* Fixes a vulnerability caused by attempting to write to immutable memory region in
+ `tf.raw_ops.ImmutableConst`
+ ([CVE-2020-26268](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26268)
+* Fixes a `CHECK`-fail in LSTM with zero-length input
+ ([CVE-2020-26270](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26270))
+* Fixes a security vulnerability caused by accessing heap data outside of bounds
+ when loading a specially crafted `SavedModel`
+ ([CVE-2020-26271](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26271))
+* Prevents memory leaks in loading `SavedModel`s that import functions
+* Updates `libjpeg-turbo` to `2.0.5` to handle
+ [CVE-2020-13790](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13790).
+* Updates `junit` to `4.13.1` to handle
+ [CVE-2020-15250](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15250).
+* Updates `PCRE` to `8.44` to handle
+ [CVE-2019-20838](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-20838)
+ and
+ [CVE-2020-14155](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14155).
+* Updates `sqlite3` to `3.44.0` to keep in sync with master branch.
+
+# Release 2.2.1
+
+## Bug Fixes and Other Changes
+* Fixes an undefined behavior causing a segfault in `tf.raw_ops.Switch`
+ ([CVE-2020-15190](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15190))
+* Fixes three vulnerabilities in conversion to DLPack format
+ ([CVE-2020-15191](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15191),
+ [CVE-2020-15192](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15192),
+ [CVE-2020-15193](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15193))
+* Fixes two vulnerabilities in `SparseFillEmptyRowsGrad`
+ ([CVE-2020-15194](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15194),
+ [CVE-2020-15195](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15195))
+* Fixes an integer truncation vulnerability in code using the work sharder API
+ ([CVE-2020-15202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15202))
+* Fixes a format string vulnerability in `tf.strings.as_string`
+ ([CVE-2020-15203](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15203))
+* Fixes segfault raised by calling session-only ops in eager mode
+ ([CVE-2020-15204](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15204))
+* Fixes data leak and potential ASLR violation from `tf.raw_ops.StringNGrams`
+ ([CVE-2020-15205](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15205))
+* Fixes segfaults caused by incomplete `SavedModel` validation
+ ([CVE-2020-15206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15206))
+* Fixes a data corruption due to a bug in negative indexing support in TFLite
+ ([CVE-2020-15207](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15207))
+* Fixes a data corruption due to dimension mismatch in TFLite
+ ([CVE-2020-15208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15208))
+* Fixes several vulnerabilities in TFLite saved model format
+ ([CVE-2020-15209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15209),
+ [CVE-2020-15210](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15210),
+ [CVE-2020-15211](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15211))
+* Fixes several vulnerabilities in TFLite implementation of segment sum
+ ([CVE-2020-15212](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15212),
+ [CVE-2020-15213](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15213),
+ [CVE-2020-15214](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15214))
+* Updates `sqlite3` to `3.33.00` to handle
+ [CVE-2020-9327](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-9327),
+ [CVE-2020-11655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11655),
+ [CVE-2020-11656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11656),
+ [CVE-2020-13434](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13434),
+ [CVE-2020-13435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13435),
+ [CVE-2020-13630](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13630),
+ [CVE-2020-13631](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13631),
+ [CVE-2020-13871](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13871),
+ and
+ [CVE-2020-15358](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15358).
+* Fixes deprecated usage of `collections` API
+* Removes `scipy` dependency from `setup.py` since TensorFlow does not need it
+ to install the pip package
+
# Release 2.2.0
TensorFlow 2.2 discontinues support for Python 2, [previously announced](https://groups.google.com/a/tensorflow.org/d/msg/announce/gVwS5RC8mds/dCt1ka2XAAAJ) as following [Python 2's EOL on January 1, 2020](https://www.python.org/dev/peps/pep-0373/#update).
diff --git a/tensorflow/c/eager/dlpack.cc b/tensorflow/c/eager/dlpack.cc
index f6acf442e6dc94..4a71139627b256 100644
--- a/tensorflow/c/eager/dlpack.cc
+++ b/tensorflow/c/eager/dlpack.cc
@@ -250,21 +250,36 @@ void TFE_CallDLManagedTensorDeleter(void* dlm_ptr) {
}
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
+ auto tf_dlm_context = GetDlContext(h, status);
+ if (!status->status.ok()) {
+ return nullptr;
+ }
+
+ auto* tf_dlm_data = TFE_TensorHandleDevicePointer(h, status);
+ if (!status->status.ok()) {
+ return nullptr;
+ }
+
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast(tensor->dtype());
- TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
+ auto tf_dlm_type = GetDlDataType(data_type, status);
+ if (!status->status.ok()) {
+ return nullptr;
+ }
+
+ TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
- dlm_tensor->dl_tensor.ctx = GetDlContext(h, status);
+ dlm_tensor->dl_tensor.ctx = tf_dlm_context;
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
- dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status);
- dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status);
+ dlm_tensor->dl_tensor.data = tf_dlm_data;
+ dlm_tensor->dl_tensor.dtype = tf_dlm_type;
std::vector* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector* stride_arr = &tf_dlm_tensor_ctx->strides;
@@ -277,13 +292,14 @@ void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
- dlm_tensor->dl_tensor.shape = &(*shape_arr)[0];
+ dlm_tensor->dl_tensor.shape = shape_arr->data();
// There are two ways to represent compact row-major data
// 1) nullptr indicates tensor is compact and row-majored.
// 2) fill in the strides array as the real case for compact row-major data.
// Here we choose option 2, since some frameworks didn't handle the strides
// argument properly.
- dlm_tensor->dl_tensor.strides = &(*stride_arr)[0];
+ dlm_tensor->dl_tensor.strides = stride_arr->data();
+
dlm_tensor->dl_tensor.byte_offset =
0; // TF doesn't handle the strides and byte_offsets here
return static_cast(dlm_tensor);
diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc
index 6c967dcf464872..9f0a8cd87074a8 100644
--- a/tensorflow/cc/saved_model/loader.cc
+++ b/tensorflow/cc/saved_model/loader.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/framework/attr_value.pb.h"
+#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/path.h"
@@ -72,26 +73,41 @@ uint64 GetLatencyMicroseconds(const uint64 start_microseconds) {
// Ensure that constant tensors loaded from the saved model have valid shape.
// Also ensure that constant nodes have a value assigned to them.
// TODO(b/154763635): this is temporary and will be replaced with a better audit
+static Status ValidateNode(const NodeDef& node) {
+ const auto node_iterator = node.attr().find("value");
+ if (node_iterator != node.attr().end()) {
+ AttrValue node_value = node_iterator->second;
+ if (node_value.has_tensor()) {
+ const PartialTensorShape node_shape(node_value.tensor().tensor_shape());
+ if (node_shape.num_elements() < 0) {
+ return errors::FailedPrecondition(
+ "Saved model contains node \"", node.name(), "\" (op \"", node.op(),
+ "\") which initializes from a tensor with ",
+ node_shape.num_elements(), " elements");
+ }
+ }
+ } else if (node.op() == "Const") {
+ return errors::FailedPrecondition(
+ "Saved model contains node \"", node.name(),
+ "\" which is a constant tensor but no value has been provided");
+ }
+ return Status::OK();
+}
+
static Status ValidateSavedTensors(const GraphDef& graph_def) {
for (const auto& node : graph_def.node()) {
- const auto node_iterator = node.attr().find("value");
- if (node_iterator != node.attr().end()) {
- AttrValue node_value = node_iterator->second;
- if (node_value.has_tensor()) {
- const PartialTensorShape node_shape(node_value.tensor().tensor_shape());
- if (node_shape.num_elements() < 0) {
- return errors::FailedPrecondition(
- "Saved model contains node \"", node.name(), "\" (op \"",
- node.op(), "\") which initializes from a tensor with ",
- node_shape.num_elements(), " elements");
- }
+ TF_RETURN_IF_ERROR(ValidateNode(node));
+ }
+
+ if (graph_def.has_library()) {
+ const FunctionDefLibrary& library = graph_def.library();
+ for (const auto& function : library.function()) {
+ for (const auto& node : function.node_def()) {
+ TF_RETURN_IF_ERROR(ValidateNode(node));
}
- } else if (node.op() == "Const") {
- return errors::FailedPrecondition(
- "Saved model contains node \"", node.name(),
- "\" which is a constant tensor but no value has been provided");
}
}
+
return Status::OK();
}
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
index 7760c2c968f57e..2a2b649ef8cf75 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
@@ -304,7 +304,12 @@ Status KernelAndDeviceOp::Run(
if (outputs != nullptr) {
outputs->clear();
for (int i = 0; i < context.num_outputs(); ++i) {
- outputs->push_back(Tensor(*context.mutable_output(i)));
+ const auto* output_tensor = context.mutable_output(i);
+ if (output_tensor != nullptr) {
+ outputs->push_back(Tensor(*output_tensor));
+ } else {
+ outputs->push_back(Tensor());
+ }
}
}
return Status::OK();
diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc
index fb51da9dee26ee..6f6e97788e5a68 100644
--- a/tensorflow/core/framework/attr_value_util.cc
+++ b/tensorflow/core/framework/attr_value_util.cc
@@ -38,6 +38,9 @@ namespace {
// Do not construct large tensors to compute their hash or compare for equality.
constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024; // 32mb
+// Limit nesting of tensors to 100 deep to prevent memory overflow.
+constexpr int kMaxTensorNestDepth = 100;
+
// Return the size of the tensor represented by this TensorProto. If shape is
// not fully defined return -1.
int64 TensorByteSize(const TensorProto& t) {
@@ -224,6 +227,54 @@ string SummarizeFunc(const NameAttrList& func) {
return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]");
}
+bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) {
+ int nests = 0;
+ int maxed_out = to_parse.length();
+ int open_curly = to_parse.find('{');
+ int open_bracket = to_parse.find('<');
+ int close_curly = to_parse.find('}');
+ int close_bracket = to_parse.find('>');
+ if (open_curly == -1) {
+ open_curly = maxed_out;
+ }
+ if (open_bracket == -1) {
+ open_bracket = maxed_out;
+ }
+ int min = std::min(open_curly, open_bracket);
+ do {
+ if (open_curly == maxed_out && open_bracket == maxed_out) {
+ return true;
+ }
+ if (min == open_curly) {
+ nests += 1;
+ open_curly = to_parse.find('{', open_curly + 1);
+ if (open_curly == -1) {
+ open_curly = maxed_out;
+ }
+ } else if (min == open_bracket) {
+ nests += 1;
+ open_bracket = to_parse.find('<', open_bracket + 1);
+ if (open_bracket == -1) {
+ open_bracket = maxed_out;
+ }
+ } else if (min == close_curly) {
+ nests -= 1;
+ close_curly = to_parse.find('}', close_curly + 1);
+ if (close_curly == -1) {
+ close_curly = maxed_out;
+ }
+ } else if (min == close_bracket) {
+ nests -= 1;
+ close_bracket = to_parse.find('>', close_bracket + 1);
+ if (close_bracket == -1) {
+ close_bracket = maxed_out;
+ }
+ }
+ min = std::min({open_curly, open_bracket, close_curly, close_bracket});
+ } while (nests < 100);
+ return false;
+}
+
} // namespace
string SummarizeAttrValue(const AttrValue& attr_value) {
@@ -448,7 +499,12 @@ bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) {
} else {
to_parse = strings::StrCat(field_name, ": ", text);
}
-
+ if (field_name == "tensor") {
+ if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth,
+ to_parse)) {
+ return false;
+ }
+ }
return ProtoParseFromString(to_parse, out);
}
diff --git a/tensorflow/core/graph/graph_constructor.cc b/tensorflow/core/graph/graph_constructor.cc
index 39bb0514c34f4d..ee8f6a093fd6e6 100644
--- a/tensorflow/core/graph/graph_constructor.cc
+++ b/tensorflow/core/graph/graph_constructor.cc
@@ -1436,6 +1436,17 @@ void GraphConstructor::Undo() {
Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst,
int input_index) {
+ if (output_index >= src->num_outputs()) {
+ return errors::InvalidArgument(
+ "Output ", output_index, " of node ", src->name(),
+ " does not exist. Node only has ", src->num_outputs(), " outputs.");
+ }
+ if (input_index >= dst->num_inputs()) {
+ return errors::InvalidArgument(
+ "Input ", input_index, " of node ", dst->name(),
+ " does not exist. Node only has ", dst->num_inputs(), " inputs.");
+ }
+
DataType src_out = src->output_type(output_index);
DataType dst_in = dst->input_type(input_index);
if (!TypesCompatible(dst_in, src_out)) {
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
index 3281f97457f58b..38e6eba13edde6 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
@@ -1994,6 +1994,12 @@ class ReorderCastLikeAndValuePreserving : public ArithmeticOptimizerStage {
Status TrySimplify(NodeDef* consumer, string* simplified_node_name) override {
NodeDef* producer;
+
+ if (consumer->input_size() < 1) {
+ return errors::FailedPrecondition("Node ", simplified_node_name,
+ " lacks inputs");
+ }
+
TF_RETURN_IF_ERROR(GetInputNode(consumer->input(0), &producer));
const bool producer_is_cast = IsCastLike(*producer);
const bool can_optimize =
@@ -2393,6 +2399,11 @@ class ReplaceMulWithSquare : public ArithmeticOptimizerStage {
~ReplaceMulWithSquare() override = default;
bool IsSupported(const NodeDef* node) const override {
+ if (!node || node->input_size() < 2) {
+ // Invalid node
+ return false;
+ }
+
return IsAnyMul(*node) && node->input(0) == node->input(1);
}
diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
index fed600361370cf..7acad9e679b687 100644
--- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc
@@ -68,6 +68,12 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
// The output values of this node may be needed.
return false;
}
+
+ if (node.input_size() < 1) {
+ // Node lacks input, is invalid
+ return false;
+ }
+
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index 89f50c5d173436..c812b0f4983b36 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -5897,6 +5897,24 @@ tf_kernel_library(
deps = STRING_DEPS,
)
+tf_cc_test(
+ name = "as_string_op_test",
+ size = "small",
+ srcs = ["as_string_op_test.cc"],
+ deps = [
+ ":as_string_op",
+ ":ops_testutil",
+ ":ops_util",
+ "//tensorflow/core:core_cpu",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
tf_kernel_library(
name = "unicode_ops",
prefix = "unicode_ops",
diff --git a/tensorflow/core/kernels/as_string_op.cc b/tensorflow/core/kernels/as_string_op.cc
index 8341909fbc8409..b9af976a654d99 100644
--- a/tensorflow/core/kernels/as_string_op.cc
+++ b/tensorflow/core/kernels/as_string_op.cc
@@ -65,9 +65,26 @@ class AsStringOp : public OpKernel {
OP_REQUIRES(ctx, !(scientific && shortest),
errors::InvalidArgument(
"Cannot select both scientific and shortest notation"));
+
format_ = "%";
+ if (!fill_string.empty()) {
+ switch (fill_string[0]) {
+ case ' ':
+ case '+':
+ case '-':
+ case '0':
+ case '#':
+ strings::Appendf(&format_, "%s", fill_string.c_str());
+ break;
+ default:
+ bool fill_not_supported = true;
+ OP_REQUIRES(ctx, !fill_not_supported,
+ errors::InvalidArgument("Fill argument not supported: \"",
+ fill_string, "\""));
+ }
+ }
if (width > -1) {
- strings::Appendf(&format_, "%s%d", fill_string.c_str(), width);
+ strings::Appendf(&format_, "%d", width);
}
if (precision > -1) {
strings::Appendf(&format_, ".%d", precision);
diff --git a/tensorflow/core/kernels/as_string_op_test.cc b/tensorflow/core/kernels/as_string_op_test.cc
new file mode 100644
index 00000000000000..dff78e25e72025
--- /dev/null
+++ b/tensorflow/core/kernels/as_string_op_test.cc
@@ -0,0 +1,245 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/fake_input.h"
+#include "tensorflow/core/framework/node_def_builder.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor_testutil.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/ops_testutil.h"
+#include "tensorflow/core/kernels/ops_util.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+
+namespace tensorflow {
+namespace {
+
+class AsStringGraphTest : public OpsTestBase {
+ protected:
+ Status Init(DataType input_type, const string& fill = "", int width = -1,
+ int precision = -1, bool scientific = false,
+ bool shortest = false) {
+ TF_CHECK_OK(NodeDefBuilder("op", "AsString")
+ .Input(FakeInput(input_type))
+ .Attr("fill", fill)
+ .Attr("precision", precision)
+ .Attr("scientific", scientific)
+ .Attr("shortest", shortest)
+ .Attr("width", width)
+ .Finalize(node_def()));
+ return InitOp();
+ }
+};
+
+TEST_F(AsStringGraphTest, Int8) {
+ TF_ASSERT_OK(Init(DT_INT8));
+
+ AddInputFromArray(TensorShape({3}), {-42, 0, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(&expected, {"-42", "0", "42"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, Int64) {
+ TF_ASSERT_OK(Init(DT_INT64));
+
+ AddInputFromArray(TensorShape({3}), {-42, 0, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(&expected, {"-42", "0", "42"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FloatDefault) {
+ TF_ASSERT_OK(Init(DT_FLOAT));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(
+ &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FloatScientific) {
+ TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
+ /*scientific=*/true));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(&expected, {"-4.200000e+01", "0.000000e+00",
+ "3.141590e+00", "4.200000e+01"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FloatShortest) {
+ TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
+ /*scientific=*/false, /*shortest=*/true));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(&expected, {"-42", "0", "3.14159", "42"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FloatPrecisionOnly) {
+ TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/2));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(&expected, {"-42.00", "0.00", "3.14", "42.00"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FloatWidthOnly) {
+ TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(
+ &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, Float_5_2_Format) {
+ TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5, /*precision=*/2));
+
+ AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({4}));
+ test::FillValues(&expected, {"-42.00", " 0.00", " 3.14", "42.00"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, Complex) {
+ TF_ASSERT_OK(Init(DT_COMPLEX64, /*fill=*/"", /*width=*/5, /*precision=*/2));
+
+ AddInputFromArray(TensorShape({3}), {{-4, 2}, {0}, {3.14159, -1}});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(
+ &expected, {"(-4.00, 2.00)", "( 0.00, 0.00)", "( 3.14,-1.00)"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, Bool) {
+ TF_ASSERT_OK(Init(DT_BOOL));
+
+ AddInputFromArray(TensorShape({2}), {true, false});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({2}));
+ test::FillValues(&expected, {"true", "false"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, String) {
+ Status s = Init(DT_STRING);
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(absl::StrContains(
+ s.error_message(),
+ "Value for attr 'T' of string is not in the list of allowed values"));
+}
+
+TEST_F(AsStringGraphTest, OnlyOneOfScientificAndShortest) {
+ Status s = Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
+ /*scientific=*/true, /*shortest=*/true);
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(
+ absl::StrContains(s.error_message(),
+ "Cannot select both scientific and shortest notation"));
+}
+
+TEST_F(AsStringGraphTest, NoShortestForNonFloat) {
+ Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
+ /*scientific=*/false, /*shortest=*/true);
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(absl::StrContains(
+ s.error_message(),
+ "scientific and shortest format not supported for datatype"));
+}
+
+TEST_F(AsStringGraphTest, NoScientificForNonFloat) {
+ Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
+ /*scientific=*/true);
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(absl::StrContains(
+ s.error_message(),
+ "scientific and shortest format not supported for datatype"));
+}
+
+TEST_F(AsStringGraphTest, NoPrecisionForNonFloat) {
+ Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/5);
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(absl::StrContains(s.error_message(),
+ "precision not supported for datatype"));
+}
+
+TEST_F(AsStringGraphTest, LongFill) {
+ Status s = Init(DT_INT32, /*fill=*/"asdf");
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(absl::StrContains(s.error_message(),
+ "Fill string must be one or fewer characters"));
+}
+
+TEST_F(AsStringGraphTest, FillWithZero) {
+ TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"0", /*width=*/4));
+
+ AddInputFromArray(TensorShape({3}), {-42, 0, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(&expected, {"-042", "0000", "0042"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FillWithSpace) {
+ TF_ASSERT_OK(Init(DT_INT64, /*fill=*/" ", /*width=*/4));
+
+ AddInputFromArray(TensorShape({3}), {-42, 0, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(&expected, {" -42", " 0", " 42"});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FillWithChar1) {
+ TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"-", /*width=*/4));
+
+ AddInputFromArray(TensorShape({3}), {-42, 0, 42});
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_STRING, TensorShape({3}));
+ test::FillValues(&expected, {"-42 ", "0 ", "42 "});
+ test::ExpectTensorEqual(expected, *GetOutput(0));
+}
+
+TEST_F(AsStringGraphTest, FillWithChar3) {
+ Status s = Init(DT_INT32, /*fill=*/"s");
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(
+ absl::StrContains(s.error_message(), "Fill argument not supported"));
+}
+
+TEST_F(AsStringGraphTest, FillWithChar4) {
+ Status s = Init(DT_INT32, /*fill=*/"n");
+ ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
+ ASSERT_TRUE(
+ absl::StrContains(s.error_message(), "Fill argument not supported"));
+}
+
+} // end namespace
+} // end namespace tensorflow
diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
index 19be606f184939..e3a908d1b6b20d 100644
--- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
@@ -121,7 +121,7 @@ class BoostedTreesTrainingPredictOp : public OpKernel {
auto do_work = [&resource, &bucketized_features, &cached_tree_ids,
&cached_node_ids, &output_partial_logits,
&output_node_ids, latest_tree,
- this](int32 start, int32 end) {
+ this](int64 start, int64 end) {
for (int32 i = start; i < end; ++i) {
int32 tree_id = cached_tree_ids(i);
int32 node_id = cached_node_ids(i);
@@ -237,7 +237,7 @@ class BoostedTreesPredictOp : public OpKernel {
const int32 last_tree = resource->num_trees() - 1;
auto do_work = [&resource, &bucketized_features, &output_logits, last_tree,
- this](int32 start, int32 end) {
+ this](int64 start, int64 end) {
for (int32 i = start; i < end; ++i) {
std::vector tree_logits(logits_dimension_, 0.0);
int32 tree_id = 0;
@@ -340,7 +340,7 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel {
// path. Note: feature_ids has one less value than logits_path because the
// first value of each logit path will be the bias.
auto do_work = [&resource, &bucketized_features, &output_debug_info,
- last_tree](int32 start, int32 end) {
+ last_tree](int64 start, int64 end) {
for (int32 i = start; i < end; ++i) {
// Proto to store debug outputs, per example.
boosted_trees::DebugOutput example_debug_info;
diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc
index f9bf64f2df3a91..220fc3629df476 100644
--- a/tensorflow/core/kernels/conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc
@@ -496,6 +496,14 @@ class Conv2DCustomBackpropFilterOp : public OpKernel {
const int filter_total_size = dims.spatial_dims[0].filter_size *
dims.spatial_dims[1].filter_size *
dims.in_depth;
+ OP_REQUIRES(
+ context,
+ filter_total_size * dims.out_depth == filter_backprop->NumElements(),
+ errors::InvalidArgument(
+ "filter_size does not have enough elements, requested ",
+ filter_total_size * dims.out_depth, ", got ",
+ filter_backprop->NumElements()));
+
// The output image size is the spatial size of the output.
const int output_image_size =
dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size;
@@ -519,6 +527,11 @@ class Conv2DCustomBackpropFilterOp : public OpKernel {
const size_t work_unit_size = size_A + size_B + size_C;
+ OP_REQUIRES(
+ context, work_unit_size != 0,
+ errors::InvalidArgument(
+ "Work size for convolution would be 0, which is not acceptable"));
+
const size_t shard_size =
(target_working_set_size + work_unit_size - 1) / work_unit_size;
diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc
index be5d821fc32fa6..9fb79fbac68eb0 100644
--- a/tensorflow/core/kernels/conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_input_ops.cc
@@ -673,6 +673,11 @@ class Conv2DCustomBackpropInputOp : public OpKernel {
dims.batch_size == 1 ||
thread_work_unit_size >= min_thread_work_unit_size;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
use_parallel_contraction
? 1
diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc
index bc6c64963addc4..152f33fe472f7e 100644
--- a/tensorflow/core/kernels/conv_grad_ops_3d.cc
+++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc
@@ -238,6 +238,28 @@ class Conv3DBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
@@ -345,6 +367,28 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
@@ -415,6 +459,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
// contraction compared to sharding and matmuls.
const bool use_parallel_contraction = dims.batch_size == 1;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
use_parallel_contraction
? 1
@@ -695,6 +744,28 @@ class Conv3DBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
@@ -807,6 +878,28 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}
+ OP_REQUIRES(context, input_shape.dims() == 5,
+ errors::InvalidArgument("input tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, filter_shape.dims() == 5,
+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dims() == 5,
+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
+ OP_REQUIRES(
+ context, input_shape.dim_size(4) == filter_shape.dim_size(3),
+ errors::InvalidArgument("input and filter_sizes must have the same "
+ "number of channels. Got ",
+ input_shape.dim_size(4), " for input and ",
+ filter_shape.dim_size(3), " for filter_sizes"));
+ OP_REQUIRES(
+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
+ errors::InvalidArgument("out_backprop and filter_sizes must have the "
+ "same number of channels. Got ",
+ out_backprop_shape.dim_size(4),
+ " for out_backprop and ",
+ filter_shape.dim_size(4), " for filter_sizes"));
+
ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
@@ -879,6 +972,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {
const int64 work_unit_size = size_A + size_B + size_C;
+ OP_REQUIRES(
+ context, work_unit_size > 0,
+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
+ "must all have at least 1 element"));
+
const size_t shard_size =
(target_working_set_size + work_unit_size - 1) / work_unit_size;
diff --git a/tensorflow/core/kernels/conv_grad_shape_utils.cc b/tensorflow/core/kernels/conv_grad_shape_utils.cc
index 7857257658fcff..ce90e866342311 100644
--- a/tensorflow/core/kernels/conv_grad_shape_utils.cc
+++ b/tensorflow/core/kernels/conv_grad_shape_utils.cc
@@ -126,6 +126,10 @@ Status ConvBackpropComputeDimensionsV2(
// dimensions of the filter Tensor.
VLOG(2) << "input vs filter_in depth " << dims->in_depth << " "
<< filter_shape.dim_size(num_dims - 2);
+ if (filter_shape.dim_size(num_dims - 2) <= 0) {
+ return errors ::InvalidArgument(
+ label, ": filter depth must be strictly greated than zero");
+ }
if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) {
return errors::InvalidArgument(
label, ": input depth must be evenly divisible by filter depth");
diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc
index d265e9d8f8be95..396a9266e08ee3 100644
--- a/tensorflow/core/kernels/conv_ops.cc
+++ b/tensorflow/core/kernels/conv_ops.cc
@@ -424,6 +424,9 @@ Status ComputeConv2DDimension(const Conv2DParameters& params,
errors::InvalidArgument("Patch depth too large"));
const int in_depth = static_cast(in_depth_raw);
const int patch_depth = static_cast(patch_depth_raw);
+ TF_REQUIRES(patch_depth > 0,
+ errors::InvalidArgument(
+ "filter depth must be stricly positive, got ", patch_depth));
TF_REQUIRES(in_depth % patch_depth == 0,
errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ",
diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc
index e9e11aebf6191e..5d2ad14c33b3ba 100644
--- a/tensorflow/core/kernels/conv_ops_3d.cc
+++ b/tensorflow/core/kernels/conv_ops_3d.cc
@@ -67,6 +67,11 @@ struct LaunchConvOp {
errors::InvalidArgument("CPU implementation of Conv3D "
"currently only supports dilated rates "
"of 1."));
+ OP_REQUIRES(context, filter.dim_size(3) == input.dim_size(input.dims() - 1),
+ errors::InvalidArgument(
+ "Number of channels in filter (", filter.dim_size(3),
+ ") must match last dimension of input (",
+ input.dim_size(input.dims() - 1), ")"));
functor::CuboidConvolution()(
context->eigen_device(), output->tensor(),
input.tensor(), filter.tensor(), strides[2], strides[1],
@@ -140,6 +145,8 @@ class Conv3DOp : public BinaryOp {
const int64 filter_depth = filter.dim_size(3);
const int64 out_depth = filter.dim_size(4);
+ OP_REQUIRES(context, filter_depth != 0,
+ errors::InvalidArgument("filter_depth must be non-zero"));
OP_REQUIRES(context, in_depth % filter_depth == 0,
errors::InvalidArgument(
"Input depth must be evenly divisible by filter depth: ",
diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc
index 517612eecb6057..60b10107537408 100644
--- a/tensorflow/core/kernels/ctc_decoder_ops.cc
+++ b/tensorflow/core/kernels/ctc_decoder_ops.cc
@@ -70,6 +70,9 @@ class CTCDecodeHelper {
if (inputs_shape.dims() != 3) {
return errors::InvalidArgument("inputs is not a 3-Tensor");
}
+ if (inputs_shape.num_elements() == 0) {
+ return errors::InvalidArgument("inputs must not be empty");
+ }
const int64 max_time = inputs_shape.dim_size(0);
const int64 batch_size = inputs_shape.dim_size(1);
@@ -224,6 +227,8 @@ class CTCGreedyDecoderOp : public OpKernel {
int prev_indices = -1;
for (int t = 0; t < seq_len_t(b); ++t) {
int max_class_indices;
+ OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0,
+ errors::InvalidArgument("Invalid input dimensions."));
log_prob_t(b, 0) +=
-RowMax(input_list_t[t], b, &max_class_indices);
if (max_class_indices != blank_index &&
diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc
index 6358e82fdda853..ca505e1db93145 100644
--- a/tensorflow/core/kernels/ctc_loss_op.cc
+++ b/tensorflow/core/kernels/ctc_loss_op.cc
@@ -100,11 +100,18 @@ class CTCLossOp : public OpKernel {
errors::InvalidArgument("sequence_length is not a vector"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(labels_indices->shape()),
errors::InvalidArgument("labels_indices is not a matrix"));
+ OP_REQUIRES(ctx, labels_indices->dim_size(1) > 1,
+ errors::InvalidArgument(
+ "labels_indices second dimension must be >= 1. Received ",
+ labels_indices->dim_size(1)));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(labels_values->shape()),
errors::InvalidArgument("labels_values is not a vector"));
const TensorShape& inputs_shape = inputs->shape();
const int64 max_time = inputs_shape.dim_size(0);
+ OP_REQUIRES(ctx, max_time != 0,
+ errors::InvalidArgument(
+ "Max time or first dimension of input cannot be 0."));
const int64 batch_size = inputs_shape.dim_size(1);
const int64 num_classes_raw = inputs_shape.dim_size(2);
OP_REQUIRES(
diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc
index 0b4241dbb9312c..109e6e32d5dd91 100644
--- a/tensorflow/core/kernels/data_format_ops.cc
+++ b/tensorflow/core/kernels/data_format_ops.cc
@@ -18,16 +18,52 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/data_format_ops.h"
+
+#include