From 00615b43cbf48d30c215eb8b1b6f2f5a09f1f743 Mon Sep 17 00:00:00 2001 From: Lukas Geiger Date: Sat, 11 Jul 2020 00:26:13 +0200 Subject: [PATCH 001/256] Fix deprecated usage of collections ABC --- tensorflow/python/compiler/xla/xla.py | 14 +++++++------- tensorflow/python/data/ops/dataset_ops.py | 4 ++-- tensorflow/python/data/ops/iterator_ops.py | 4 ++-- tensorflow/python/data/util/structure.py | 2 +- tensorflow/python/debug/wrappers/framework.py | 4 ++-- tensorflow/python/distribute/input_lib.py | 6 +++--- tensorflow/python/framework/indexed_slices.py | 3 ++- tensorflow/python/keras/engine/training_utils.py | 9 ++++----- .../preprocessing/preprocessing_test_utils.py | 5 ++--- tensorflow/python/keras/layers/recurrent.py | 6 +----- tensorflow/python/ops/math_ops.py | 9 ++++----- tensorflow/python/ops/nn_ops.py | 3 +-- tensorflow/python/ops/variable_scope.py | 12 ++++++------ tensorflow/python/tools/saved_model_cli.py | 4 ++-- 14 files changed, 39 insertions(+), 46 deletions(-) diff --git a/tensorflow/python/compiler/xla/xla.py b/tensorflow/python/compiler/xla/xla.py index 5b19dc4ec5fc5d..b68640f9b428b5 100644 --- a/tensorflow/python/compiler/xla/xla.py +++ b/tensorflow/python/compiler/xla/xla.py @@ -18,7 +18,6 @@ from __future__ import division from __future__ import print_function -import collections import contextlib from six.moves import xrange # pylint: disable=redefined-builtin @@ -37,6 +36,7 @@ from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export _XLA_COMPILE_ATTR = '_xla_compile_id' @@ -329,7 +329,7 @@ def _compile_internal(computation, inputs=None): if inputs is None: inputs = [] - if not isinstance(inputs, collections.Sequence): + if not isinstance(inputs, collections_abc.Sequence): raise TypeError('inputs must be a list') # Flatten inputs. @@ -428,15 +428,15 @@ def is_flat(outputs): """ # If outputs is a list or tuple, check if it has any nested structure. If # there is, then outputs is non-flat. - if isinstance(outputs, collections.Sequence): + if isinstance(outputs, collections_abc.Sequence): for o in outputs: - if (isinstance(o, collections.Sequence) or - isinstance(o, collections.Mapping) or + if (isinstance(o, collections_abc.Sequence) or + isinstance(o, collections_abc.Mapping) or hasattr(o.__class__, '__attrs_attrs__')): return False # If outputs is a dict, it is non-flat. - if isinstance(outputs, collections.Mapping): + if isinstance(outputs, collections_abc.Mapping): return False # If outputs is from the attrs library, it is non-flat. @@ -467,7 +467,7 @@ def _postprocess_flat_outputs(outputs): if outputs is None: outputs = tuple() # If the computation only returned one value, make it a tuple. - if not isinstance(outputs, collections.Sequence): + if not isinstance(outputs, collections_abc.Sequence): outputs = (outputs,) # Append `no_op` here so that return value of this function always contains diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py index 586b82e9ca658e..d82db49f4aa850 100644 --- a/tensorflow/python/data/ops/dataset_ops.py +++ b/tensorflow/python/data/ops/dataset_ops.py @@ -18,7 +18,6 @@ from __future__ import print_function import abc -import collections import functools import sys import threading @@ -72,6 +71,7 @@ from tensorflow.python.util import function_utils from tensorflow.python.util import lazy_loader from tensorflow.python.util import nest as tf_nest +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export # Loaded lazily due to a circular dependency (roughly @@ -103,7 +103,7 @@ @tf_export("data.Dataset", v1=[]) @six.add_metaclass(abc.ABCMeta) -class DatasetV2(collections.Iterable, tracking_base.Trackable, +class DatasetV2(collections_abc.Iterable, tracking_base.Trackable, composite_tensor.CompositeTensor): """Represents a potentially large set of elements. diff --git a/tensorflow/python/data/ops/iterator_ops.py b/tensorflow/python/data/ops/iterator_ops.py index 36e26e2938461c..5bb9e0e6598832 100644 --- a/tensorflow/python/data/ops/iterator_ops.py +++ b/tensorflow/python/data/ops/iterator_ops.py @@ -18,7 +18,6 @@ from __future__ import print_function import abc -import collections import threading import warnings @@ -41,6 +40,7 @@ from tensorflow.python.training.saver import BaseSaverBuilder from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import deprecation +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export @@ -543,7 +543,7 @@ def __del__(self): @tf_export("data.Iterator", v1=[]) @six.add_metaclass(abc.ABCMeta) -class IteratorBase(collections.Iterator, trackable.Trackable, +class IteratorBase(collections_abc.Iterator, trackable.Trackable, composite_tensor.CompositeTensor): """Represents an iterator of a `tf.data.Dataset`. diff --git a/tensorflow/python/data/util/structure.py b/tensorflow/python/data/util/structure.py index 87825005069bdf..30e393c82def13 100644 --- a/tensorflow/python/data/util/structure.py +++ b/tensorflow/python/data/util/structure.py @@ -440,7 +440,7 @@ def type_spec_from_value(element, use_fallback=True): if isinstance(element, tuple): if hasattr(element, "_fields") and isinstance( - element._fields, collections.Sequence) and all( + element._fields, collections_abc.Sequence) and all( isinstance(f, six.string_types) for f in element._fields): if isinstance(element, wrapt.ObjectProxy): element_type = type(element.__wrapped__) diff --git a/tensorflow/python/debug/wrappers/framework.py b/tensorflow/python/debug/wrappers/framework.py index 9b107fe9a2b0ca..bfbcf3eae02beb 100644 --- a/tensorflow/python/debug/wrappers/framework.py +++ b/tensorflow/python/debug/wrappers/framework.py @@ -99,7 +99,6 @@ from __future__ import print_function import abc -import collections import re import threading @@ -113,6 +112,7 @@ from tensorflow.python.platform import tf_logging from tensorflow.python.training import monitored_session from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc # Helper function. @@ -445,7 +445,7 @@ def is_empty(x): """Check whether a possibly nested structure is empty.""" if not nest.is_nested(x): return False - if isinstance(x, collections.Mapping): + if isinstance(x, collections_abc.Mapping): return is_empty(list(x.values())) for item in x: if not is_empty(item): diff --git a/tensorflow/python/distribute/input_lib.py b/tensorflow/python/distribute/input_lib.py index 74268999de0789..ff41f172c9bf95 100644 --- a/tensorflow/python/distribute/input_lib.py +++ b/tensorflow/python/distribute/input_lib.py @@ -18,7 +18,6 @@ from __future__ import division from __future__ import print_function -import collections import functools import sys @@ -53,6 +52,7 @@ from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.types import distribute as distribute_types from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export from tensorflow.tools.docs import doc_controls @@ -143,7 +143,7 @@ def get_distributed_datasets_from_function(dataset_fn, @tf_export("distribute.DistributedIterator", v1=[]) -class DistributedIteratorInterface(collections.Iterator, +class DistributedIteratorInterface(collections_abc.Iterator, distribute_types.Iterator): """An iterator over `tf.distribute.DistributedDataset`. @@ -272,7 +272,7 @@ def get_next_as_optional(self): @tf_export("distribute.DistributedDataset", v1=[]) -class DistributedDatasetInterface(collections.Iterable, +class DistributedDatasetInterface(collections_abc.Iterable, distribute_types.Iterable): # pylint: disable=line-too-long """Represents a dataset distributed among devices and machines. diff --git a/tensorflow/python/framework/indexed_slices.py b/tensorflow/python/framework/indexed_slices.py index 6ddf9410fd7385..45f6e254b0ef31 100644 --- a/tensorflow/python/framework/indexed_slices.py +++ b/tensorflow/python/framework/indexed_slices.py @@ -32,6 +32,7 @@ from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import type_spec from tensorflow.python.types import internal +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import tf_export @@ -344,7 +345,7 @@ def internal_convert_n_to_tensor_or_indexed_slices(values, RuntimeError: If a registered conversion function returns an invalid value. """ - if not isinstance(values, collections.Iterable): + if not isinstance(values, collections_abc.Iterable): raise TypeError("values must be iterable.") ret = [] for i, value in enumerate(values): diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py index 0d7637cb98cb56..1dd64f5e5b8d23 100644 --- a/tensorflow/python/keras/engine/training_utils.py +++ b/tensorflow/python/keras/engine/training_utils.py @@ -19,7 +19,6 @@ import abc import atexit -import collections from collections import OrderedDict import functools import multiprocessing.pool @@ -617,7 +616,7 @@ def standardize_sample_or_class_weights(x_weight, output_names, weight_type): 'You should provide one `' + weight_type + '`' 'array per model output.') return x_weight - if isinstance(x_weight, collections.Mapping): + if isinstance(x_weight, collections_abc.Mapping): generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names) x_weights = [] for name in output_names: @@ -864,7 +863,7 @@ def collect_per_output_metric_info(metrics, [metrics_module.clone_metric(m) for m in metrics]) else: nested_metrics = [metrics] - elif isinstance(metrics, collections.Mapping): + elif isinstance(metrics, collections_abc.Mapping): generic_utils.check_for_unexpected_keys('metrics', metrics, output_names) nested_metrics = [] for name in output_names: @@ -1443,7 +1442,7 @@ def prepare_sample_weight_modes(training_endpoints, sample_weight_mode): ValueError: In case of invalid `sample_weight_mode` input. """ - if isinstance(sample_weight_mode, collections.Mapping): + if isinstance(sample_weight_mode, collections_abc.Mapping): generic_utils.check_for_unexpected_keys( 'sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints]) @@ -1536,7 +1535,7 @@ def prepare_loss_weights(training_endpoints, loss_weights=None): if loss_weights is None: for e in training_endpoints: e.loss_weight = 1. - elif isinstance(loss_weights, collections.Mapping): + elif isinstance(loss_weights, collections_abc.Mapping): generic_utils.check_for_unexpected_keys( 'loss_weights', loss_weights, [e.output_name for e in training_endpoints]) diff --git a/tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py b/tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py index 006cab1fb1197a..91545b8ee28eef 100644 --- a/tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py +++ b/tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py @@ -18,11 +18,10 @@ from __future__ import division from __future__ import print_function -import collections - import numpy as np from tensorflow.python.platform import test +from tensorflow.python.util.compat import collections_abc class PreprocessingLayerTest(test.TestCase): @@ -38,7 +37,7 @@ def assertAllCloseOrEqual(self, a, b, msg=None): self.assertEqual(len(a), len(b)) for a_value, b_value in zip(a, b): self.assertAllCloseOrEqual(a_value, b_value, msg=msg) - elif isinstance(a, collections.Mapping): + elif isinstance(a, collections_abc.Mapping): self.assertEqual(len(a), len(b)) for key, a_value in a.items(): b_value = b[key] diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py index 78a4a33a5339db..4eb368774b80bb 100644 --- a/tensorflow/python/keras/layers/recurrent.py +++ b/tensorflow/python/keras/layers/recurrent.py @@ -44,14 +44,10 @@ from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import data_structures from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls -try: - from collections import abc as collections_abc # pylint: disable=g-import-not-at-top -except ImportError: # For Python 2 - import collections as collections_abc # pylint: disable=g-import-not-at-top - RECURRENT_DROPOUT_WARNING_MSG = ( 'RNN `implementation=2` is not supported when `recurrent_dropout` is set. ' diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index 79c74a81d80326..df94410f467847 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -70,8 +70,6 @@ from __future__ import division from __future__ import print_function -import collections - import numpy as np import six from six.moves import builtins @@ -100,6 +98,7 @@ from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export # Aliases for some automatically-generated names. @@ -3493,7 +3492,7 @@ def add_n(inputs, name=None): ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ - if not inputs or not isinstance(inputs, collections.Iterable): + if not inputs or not isinstance(inputs, collections_abc.Iterable): raise ValueError("inputs must be an iterable of at least one " "Tensor/IndexedSlices with the same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) @@ -3626,9 +3625,9 @@ def sigmoid(x, name=None): Returns: A Tensor with the same type as `x`. - + Usage Example: - + >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32) >>> tf.sigmoid(x) Date: Sat, 18 Jul 2020 16:29:23 -0700 Subject: [PATCH 002/256] Resolved merge conflict in core/kernels/BUILD --- tensorflow/core/kernels/BUILD | 4 ++ tensorflow/core/kernels/batch_kernels.cc | 1 + tensorflow/core/kernels/batching_util/BUILD | 24 ++++++++ .../batching_util/threadsafe_status.cc | 51 +++++++++++++++++ .../kernels/batching_util/threadsafe_status.h | 57 +++++++++++++++++++ .../batching_util/threadsafe_status_test.cc | 51 +++++++++++++++++ 6 files changed, 188 insertions(+) create mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status.cc create mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status.h create mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status_test.cc diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 7da864a6027811..cbe0276003e69a 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -672,6 +672,10 @@ cc_library( "//tensorflow/core:protos_all_cc", "//tensorflow/core/kernels/batching_util:periodic_function_dynamic", "//tensorflow/core/kernels/batching_util:shared_batch_scheduler_hdrs", + "//tensorflow/core/kernels/batching_util:threadsafe_status", + "//tensorflow/core/util:incremental_barrier", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/strings", ], alwayslink = 1, ) diff --git a/tensorflow/core/kernels/batch_kernels.cc b/tensorflow/core/kernels/batch_kernels.cc index 6449a399573e1b..269b4d412cc8e3 100644 --- a/tensorflow/core/kernels/batch_kernels.cc +++ b/tensorflow/core/kernels/batch_kernels.cc @@ -23,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/batching_util/periodic_function.h" #include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h" +#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" #include "tensorflow/core/kernels/concat_lib.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/split_lib.h" diff --git a/tensorflow/core/kernels/batching_util/BUILD b/tensorflow/core/kernels/batching_util/BUILD index 803eb2e9048494..a23857cc8ce221 100644 --- a/tensorflow/core/kernels/batching_util/BUILD +++ b/tensorflow/core/kernels/batching_util/BUILD @@ -52,6 +52,18 @@ cc_library( ], ) +cc_library( + name = "threadsafe_status", + srcs = ["threadsafe_status.cc"], + hdrs = ["threadsafe_status.h"], + deps = [ + "//tensorflow/core:lib", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/status", + "@com_google_absl//absl/synchronization", + ], +) + tf_cc_test( name = "batch_scheduler_test", srcs = ["batch_scheduler_test.cc"], @@ -186,6 +198,18 @@ tf_cc_test( ], ) +tf_cc_test( + name = "threadsafe_status_test", + srcs = ["threadsafe_status_test.cc"], + deps = [ + ":threadsafe_status", + "//tensorflow/core:lib", + "//tensorflow/core:protos_all_cc", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + ], +) + cc_library( name = "fake_clock_env", testonly = 1, diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status.cc b/tensorflow/core/kernels/batching_util/threadsafe_status.cc new file mode 100644 index 00000000000000..fa5cda7161b4e0 --- /dev/null +++ b/tensorflow/core/kernels/batching_util/threadsafe_status.cc @@ -0,0 +1,51 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" + +#include "absl/base/thread_annotations.h" +#include "absl/status/status.h" +#include "absl/synchronization/mutex.h" +#include "tensorflow/core/platform/mutex.h" + +namespace tensorflow { +const Status& ThreadSafeStatus::status() const& { + tf_shared_lock lock(mutex_); + return status_; +} + +Status ThreadSafeStatus::status() && { + tf_shared_lock lock(mutex_); + return std::move(status_); +} + +void ThreadSafeStatus::Update(const Status& new_status) { + if (new_status.ok()) { + return; + } + + mutex_lock lock(mutex_); + status_.Update(new_status); +} + +void ThreadSafeStatus::Update(Status&& new_status) { + if (new_status.ok()) { + return; + } + + mutex_lock lock(mutex_); + status_.Update(std::forward(new_status)); +} +} // namespace tensorflow diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status.h b/tensorflow/core/kernels/batching_util/threadsafe_status.h new file mode 100644 index 00000000000000..c14a8a907147bd --- /dev/null +++ b/tensorflow/core/kernels/batching_util/threadsafe_status.h @@ -0,0 +1,57 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ +#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ + +#include "tensorflow/core/platform/mutex.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/thread_annotations.h" + +namespace tensorflow { +// Wrapper class to allow both lock-free construction and concurrent updates on +// a 'status'. +// +// Example Usage: +// std::thread threads[2]; +// ThreadSafeStatus thread_safe_status; +// threads[0] = std::thread([&]() { +// status.Update(errors::Internal("internal error")); +// }); +// threads[1] = std::thread([&]() { +// status.Update(errors::InvalidArgument("invalid argument")); +// }); +// threads[0].Join(); +// threads[1].Join(); +// +// NOTE: +// When updated in a multi-threading setup, only the first error is retained. +class ThreadSafeStatus { + public: + const Status& status() const& TF_LOCKS_EXCLUDED(mutex_); + Status status() && TF_LOCKS_EXCLUDED(mutex_); + + // Retains the first error status: replaces the current status with + // `new_status` if `new_status` is not OK and the previous status is OK. + void Update(const Status& new_status) TF_LOCKS_EXCLUDED(mutex_); + void Update(Status&& new_status) TF_LOCKS_EXCLUDED(mutex_); + + private: + mutable mutex mutex_; + Status status_ TF_GUARDED_BY(mutex_); +}; +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc b/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc new file mode 100644 index 00000000000000..e0c5d03c8a451c --- /dev/null +++ b/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc @@ -0,0 +1,51 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" + +#include "tensorflow/core/lib/core/status_test_util.h" +#include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/platform/test.h" +#include "tensorflow/core/protobuf/error_codes.pb.h" + +namespace tensorflow { +namespace { + +TEST(ThreadSafeStatus, DefaultOk) { + ThreadSafeStatus status; + TF_EXPECT_OK(status.status()); +} + +TEST(ThreadSafeStatus, Update) { + ThreadSafeStatus status; + TF_EXPECT_OK(status.status()); + + status.Update(errors::FailedPrecondition("original error")); + EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); + + status.Update(Status::OK()); + EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); + + status.Update(errors::Internal("new error")); + EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); +} + +TEST(ThreadSafeStatus, Move) { + ThreadSafeStatus status; + TF_EXPECT_OK(std::move(status).status()); +} + +} // namespace +} // namespace tensorflow From 6ee77b0b8e882dc9c419d89701269f41f438a379 Mon Sep 17 00:00:00 2001 From: Mingming Liu Date: Sat, 18 Jul 2020 16:29:23 -0700 Subject: [PATCH 003/256] Move helper class ThreadSafeStatus into a separate file with unit test. PiperOrigin-RevId: 321974213 Change-Id: I78ceb91618c40da799097aa4d2048be0bf182c16 --- tensorflow/core/kernels/batching_util/BUILD | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/batching_util/BUILD b/tensorflow/core/kernels/batching_util/BUILD index a23857cc8ce221..ec246323679f95 100644 --- a/tensorflow/core/kernels/batching_util/BUILD +++ b/tensorflow/core/kernels/batching_util/BUILD @@ -64,6 +64,18 @@ cc_library( ], ) +cc_library( + name = "threadsafe_status", + srcs = ["threadsafe_status.cc"], + hdrs = ["threadsafe_status.h"], + deps = [ + "//tensorflow/core:lib", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/status", + "@com_google_absl//absl/synchronization", + ], +) + tf_cc_test( name = "batch_scheduler_test", srcs = ["batch_scheduler_test.cc"], From fd3b3ca6f559fed0ee0133844e8fad62dacf1e40 Mon Sep 17 00:00:00 2001 From: Goldie Gadde Date: Tue, 28 Jul 2020 13:34:56 -0700 Subject: [PATCH 004/256] Update the release notes to fix some typos and missed changes. --- RELEASE.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 7fe48af4adb73b..136e71cd5de000 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -28,7 +28,7 @@ * Deprecated overrides of `DatasetBase::MakeIterator` and `MakeIteratorFromInputElement` are removed. * The signature of `tensorflow::data::IteratorBase::SaveInternal` and `tensorflow::data::IteratorBase::SaveInput` has been extended with `SerializationContext` argument to enable overriding the default policy for the handling external state during iterator checkpointing. This is not a backwards compatible change and all subclasses of `IteratorBase` *need to be updated* accordingly. * `tf.keras` - * Add a new `BackupAndRestore` callback for handling distributed training failures & restarts. Please take a look at this [tutorial](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras) for details on how to use the callback. + * Add a new `BackupAndRestore` callback for handling distributed training failures & restarts. Please take a look at this [tutorial](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras) for details on how to use the callback. * `tf.image.extract_glimpse` has been updated to correctly process the case where `centered=False` and `normalized=False`. This is a breaking change as the output is different from (incorrect) previous versions. Note this @@ -38,6 +38,10 @@ exsiting C++ kernel `ExtractGlimpse` does not change either, so saved models using `tf.raw_ops.ExtractGlimpse` will not be impacted. +## Known Caveats + * `tf.lite` + * Keras-based LSTM models must be converted with an explicit batch size in the input layer. + ## Bug Fixes and Other Changes ### TF Core: @@ -74,9 +78,10 @@ * `@tf.function` from SavedModel no longer ignores args after a `RaggedTensor` when selecting the concrete function to run. * Fix save model issue for ops with a list of functions. * Add `tf.saved_model.LoadOptions` with [`experimental_io_device`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/saved_model/LoadOptions?hl=en) as arg with default value `None` to choose the I/O device for loading models and weights. - * Update `tf.saved_model.SaveOptions` with [`experimental_io_device`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/saved_model/SaveOptions?hl=en) as arg with default value `None` to choose the I/O device for saving models and weights. + * Update `tf.saved_model.SaveOptions` with [`experimental_io_device`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/saved_model/SaveOptions?hl=en) as arg with default value `None` to choose the I/O device for saving models and weights. + * Mutable tables now restore checkpointed values when loaded from SavedModel. * GPU - * No longer includes PTX kernels for GPU except for sm_70 to reduce binary size. On systems with NVIDIA® Ampere GPUs (CUDA architecture 8.0) or newer, kernels are JIT-compiled from PTX and TensorFlow can take over 30 minutes to start up. This overhead can be limited to the first start up by increasing the default JIT cache size with: `export CUDA_CACHE_MAXSIZE=2147483648`.: + * TF 2.3 includes PTX kernels only for [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0 to reduce the TF pip binary size. Earlier releases included PTX for a variety of older compute capabilities. * Others * Retain parent namescope for ops added inside `tf.while_loop`/`tf.cond`/`tf.switch_case`. * Update `tf.vectorized_map` to support vectorizing `tf.while_loop` and TensorList operations. @@ -197,10 +202,6 @@ This release contains contributions from many people at Google, as well as: 902449@58880@bigcat_chen@ASIC, Abdul Baseer Khan, Abhineet Choudhary, Abolfazl Shahbazi, Adam Hillier, ag.ramesh, Agoniii, Ajay P, Alex Hoffman, Alexander Bayandin, Alexander Grund, Alexandre Abadie, Alexey Rogachevskiy, amoitra, Andrew Stevens, Angus-Luo, Anshuman Tripathy, Anush Elangovan, Artem Mavrin, Ashutosh Hathidara, autoih, Ayushman Kumar, ayushmankumar7, Bairen Yi, Bas Aarts, Bastian Eichenberger, Ben Barsdell, bhack, Bharat Raghunathan, Biagio Montaruli, Bigcat-Himax, blueyi, Bryan Cutler, Byambaa, Carlos Hernandez-Vaquero, Chen Lei, Chris Knorowski, Christian Clauss, chuanqiw, CuiYifeng, Daniel Situnayake, Daria Zhuravleva, Dayananda-V, Deven Desai, Devi Sandeep Endluri, Dmitry Zakharov, Dominic Jack, Duncan Riach, Edgar Liberis, Ehsan Toosi, ekuznetsov139, Elena Zhelezina, Eugene Kuznetsov, Eugene Mikhantiev, Evgenii Zheltonozhskii, Fabio Di Domenico, Fausto Morales, Fei Sun, feihugis, Felix E. Klee, flyingcat, Frederic Bastien, Fredrik Knutsson, frreiss, fsx950223, ganler, Gaurav Singh, Georgios Pinitas, Gian Marco Iodice, Giorgio Arena, Giuseppe Rossini, Gregory Keith, Guozhong Zhuang, gurushantj, Hahn Anselm, Harald Husum, Harjyot Bagga, Hristo Vrigazov, Ilya Persky, Ir1d, Itamar Turner-Trauring, jacco, Jake Tae, Janosh Riebesell, Jason Zaman, jayanth, Jeff Daily, Jens Elofsson, Jinzhe Zeng, JLZ, Jonas Skog, Jonathan Dekhtiar, Josh Meyer, Joshua Chia, Judd, justkw, Kaixi Hou, Kam D Kasravi, Kamil Rakoczy, Karol Gugala, Kayou, Kazuaki Ishizaki, Keith Smiley, Khaled Besrour, Kilaru Yasaswi Sri Chandra Gandhi, Kim, Young Soo, Kristian Hartikainen, Kwabena W. Agyeman, Leslie-Fang, Leslie-Fang-Intel, Li, Guizi, Lukas Geiger, Lutz Roeder, M\U00E5Ns Nilsson, Mahmoud Abuzaina, Manish, Marcel Koester, Marcin Sielski, marload, Martin Jul, Matt Conley, mdfaijul, Meng, Peng, Meteorix, Michael Käufl, Michael137, Milan Straka, Mitchell Vitez, Ml-0, Mokke Meguru, Mshr-H, nammbash, Nathan Luehr, naumkin, Neeraj Bhadani, ngc92, Nick Morgan, nihui, Niranjan Hasabnis, Niranjan Yadla, Nishidha Panpaliya, Oceania2018, oclyke, Ouyang Jin, OverLordGoldDragon, Owen Lyke, Patrick Hemmer, Paul Andrey, Peng Sun, periannath, Phil Pearl, Prashant Dandriyal, Prashant Kumar, Rahul Huilgol, Rajan Singh, Rajeshwar Reddy T, rangjiaheng, Rishit Dagli, Rohan Reddy, rpalakkal, rposts, Ruan Kunliang, Rushabh Vasani, Ryohei Ikegami, Semun Lee, Seo-Inyoung, Sergey Mironov, Sharada Shiddibhavi, ShengYang1, Shraiysh Vaishay, Shunya Ueta, shwetaoj, Siyavash Najafzade, Srinivasan Narayanamoorthy, Stephan Uphoff, storypku, sunchenggen, sunway513, Sven-Hendrik Haase, Swapnil Parekh, Tamas Bela Feher, Teng Lu, tigertang, tomas, Tomohiro Ubukata, tongxuan.ltx, Tony Tonev, Tzu-Wei Huang, Téo Bouvard, Uday Bondhugula, Vaibhav Jade, Vijay Tadikamalla, Vikram Dattu, Vincent Abriou, Vishnuvardhan Janapati, Vo Van Nghia, VoVAllen, Will Battel, William D. Irons, wyzhao, Xiaoming (Jason) Cui, Xiaoquan Kong, Xinan Jiang, xutianming, Yair Ehrenwald, Yasir Modak, Yasuhiro Matsumoto, Yixing Fu, Yong Tang, Yuan Tang, zhaozheng09, Zilin Zhu, zilinzhu, 张志豪 -## Bug Fixes and Other Changes - -* Mutable tables now restore checkpointed values when loaded from SavedModel. - # Release 2.1.1 ## Bug Fixes and Other Changes From 65341f73d110bf173325768947343e1bb8f699fc Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 29 Jul 2020 10:48:39 -0700 Subject: [PATCH 005/256] Remove scipy dependency. See #40884, #35709, #40789. --- tensorflow/tools/pip_package/setup.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 42233f80c1c9f2..60c8f35a903f82 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -69,8 +69,6 @@ 'wrapt >= 1.11.1', 'wheel >= 0.26', 'six >= 1.12.0', - # scipy < 1.4.1 causes segfaults due to pybind11 - 'scipy == 1.4.1', ] if sys.byteorder == 'little': From 24c2db13942177729ae22d2e86e4f46b1a5d8f63 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 11:11:27 -0700 Subject: [PATCH 006/256] Pin Estimator nightly to latest version used during release --- tensorflow/tools/ci_build/release/common.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/tools/ci_build/release/common.sh b/tensorflow/tools/ci_build/release/common.sh index b533564e7a1f42..cf556ce291d0ac 100644 --- a/tensorflow/tools/ci_build/release/common.sh +++ b/tensorflow/tools/ci_build/release/common.sh @@ -144,7 +144,7 @@ function install_pip_deps { ${SUDO_CMD} ${PIP_CMD} install scikit-learn ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs - ${PIP_CMD} install --user --upgrade tf-estimator-nightly + ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.4.0.dev2020072601 ${PIP_CMD} install --user --upgrade "future>=0.17.1" ${PIP_CMD} install --user --upgrade wrapt # LINT.ThenChange(:ubuntu_16_pip_installations) @@ -178,7 +178,7 @@ function install_ubuntu_16_pip_deps { "${PIP_CMD}" install scikit-learn --user "${PIP_CMD}" install PyYAML==3.13 --user # b/156523241 - "${PIP_CMD}" install --force-reinstall --user --upgrade tf-estimator-nightly + "${PIP_CMD}" install --force-reinstall --user --upgrade tf-estimator-nightly==2.4.0.dev2020072601 "${PIP_CMD}" install --user --upgrade tb-nightly "${PIP_CMD}" install --user --upgrade wrapt # LINT.ThenChange(:ubuntu_pip_installations) @@ -222,7 +222,7 @@ function install_macos_pip_deps { ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs # b/156523241 - ${PIP_CMD} install --force-reinstall --user --upgrade tf-estimator-nightly + ${PIP_CMD} install --force-reinstall --user --upgrade tf-estimator-nightly==2.4.0.dev2020072601 ${PIP_CMD} install --user --upgrade wrapt ${PIP_CMD} install --user --upgrade "future>=0.17.1" } From c418ff6ae918f685ec49648a5886f81e3d097484 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 11:13:38 -0700 Subject: [PATCH 007/256] Update common_win.bat --- tensorflow/tools/ci_build/release/common_win.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/release/common_win.bat b/tensorflow/tools/ci_build/release/common_win.bat index fa577fcfc33f7b..ec6f326f44e448 100644 --- a/tensorflow/tools/ci_build/release/common_win.bat +++ b/tensorflow/tools/ci_build/release/common_win.bat @@ -28,7 +28,7 @@ SET PATH=%PATH%;C:\%PYTHON_DIRECTORY% %PIP_EXE% install setuptools --upgrade %PIP_EXE% install future>=0.17.1 --no-deps -%PIP_EXE% install --ignore-installed --force-reinstall --upgrade tf-estimator-nightly --no-deps +%PIP_EXE% install --ignore-installed --force-reinstall --upgrade tf-estimator-nightly==2.4.0.dev2020072601 --no-deps %PIP_EXE% install tb-nightly --no-deps %PIP_EXE% install numpy==1.16.0 --upgrade --no-deps %PIP_EXE% install opt_einsum --upgrade From a1f4941a5c042687e71fb9437df40874a093c909 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 12:56:08 -0700 Subject: [PATCH 008/256] Add rel/ pool --- .../ci_build/rel/macos/cpu_libtensorflow.sh | 27 ++++++++ .../ci_build/rel/macos/cpu_py35_nonpip.sh | 51 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py35_pip.sh | 51 ++++++++++++++ .../ci_build/rel/macos/cpu_py36_nonpip.sh | 51 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py36_pip.sh | 51 ++++++++++++++ .../ci_build/rel/macos/cpu_py37_nonpip.sh | 51 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py37_pip.sh | 51 ++++++++++++++ .../ci_build/rel/macos/cpu_py38_nonpip.sh | 51 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py38_pip.sh | 51 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_libtensorflow.sh | 40 +++++++++++ .../ci_build/rel/ubuntu/cpu_py35_nonpip.sh | 48 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py35_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_py36_nonpip.sh | 48 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py36_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_py37_nonpip.sh | 48 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py37_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_py38_nonpip.sh | 48 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py38_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/gpu_libtensorflow.sh | 40 +++++++++++ .../ci_build/rel/ubuntu/gpu_pip_on_cpu.sh | 61 ++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py35_nonpip.sh | 60 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py35_pip.sh | 69 +++++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py36_nonpip.sh | 60 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py36_pip.sh | 69 +++++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py37_nonpip.sh | 60 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py37_pip.sh | 69 +++++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py38_nonpip.sh | 60 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py38_pip.sh | 69 +++++++++++++++++++ .../tools/ci_build/rel/ubuntu/sanity.sh | 36 ++++++++++ .../rel/windows/cpu_libtensorflow.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py35.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py36.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py37.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py38.bat | 21 ++++++ .../rel/windows/gpu_libtensorflow.bat | 20 ++++++ .../ci_build/rel/windows/gpu_pip_on_cpu.bat | 21 ++++++ .../tools/ci_build/rel/windows/gpu_py35.bat | 23 +++++++ .../tools/ci_build/rel/windows/gpu_py36.bat | 23 +++++++ .../tools/ci_build/rel/windows/gpu_py37.bat | 23 +++++++ .../tools/ci_build/rel/windows/gpu_py38.bat | 23 +++++++ 40 files changed, 1762 insertions(+) create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py38_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py38_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_libtensorflow.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_libtensorflow.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/sanity.sh create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py35.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py36.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py37.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py38.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py35.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py36.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py37.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py38.bat diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh b/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh new file mode 100644 index 00000000000000..348778b5f158ee --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +echo "chmod go+w lib_package/*" >> tensorflow/tools/ci_build/linux/libtensorflow.sh +echo "bazel clean --expunge" >> tensorflow/tools/ci_build/linux/libtensorflow.sh + +# Install latest bazel +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh new file mode 100644 index 00000000000000..06fabd7b1c7c83 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python3.5 -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.5 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.5) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac,-no_oss_py35,-v1only,-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh new file mode 100644 index 00000000000000..3f31033b2ac478 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.5 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.5' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py35,-gpu,-tpu,-benchmark-test' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh new file mode 100644 index 00000000000000..51cc3da62d6b93 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python3.6 -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.6 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.6) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac,-no_oss_py36,-v1only,-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh new file mode 100644 index 00000000000000..26ee4ea8edbd0d --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.6 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.6' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py35,-v1only,-gpu,-tpu,-benchmark-test' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh new file mode 100644 index 00000000000000..e0f2968b45a121 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.7 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.7) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac$(maybe_skip_v1),-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh new file mode 100644 index 00000000000000..ed577db961a41e --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.7 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.7' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py37,-v1only,-gpu,-tpu,-benchmark-test' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py38_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py38_nonpip.sh new file mode 100644 index 00000000000000..22475f35491828 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py38_nonpip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.8 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.8) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac$(maybe_skip_v1),-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py38_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py38_pip.sh new file mode 100644 index 00000000000000..f8eda5a7520034 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py38_pip.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.8 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.8' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py38,-v1only,-gpu,-tpu,-benchmark-test' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_libtensorflow.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_libtensorflow.sh new file mode 100644 index 00000000000000..a0e3a7f4594102 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_libtensorflow.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e + +# Source the external common scripts. +source tensorflow/tools/ci_build/release/common.sh + + +# Install latest bazel +install_bazelisk +which bazel + +# Install realpath +sudo apt-get install realpath + +# Update the version string to nightly +if [ -n "${IS_NIGHTLY_BUILD}" ]; then + ./tensorflow/tools/ci_build/update_version.py --nightly +fi + +./tensorflow/tools/ci_build/linux/libtensorflow.sh + +# Copy the nightly version update script +if [ -n "${IS_NIGHTLY_BUILD}" ]; then + cp tensorflow/tools/ci_build/builds/libtensorflow_nightly_symlink.sh lib_package +fi + diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh new file mode 100644 index 00000000000000..5339671cce3e4d --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.5) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py35,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh new file mode 100644 index 00000000000000..5d0cbacb0b7060 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.5' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py35,-v1only' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh new file mode 100644 index 00000000000000..c2790420afcd3f --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py36,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh new file mode 100644 index 00000000000000..25c4de88cdd7fb --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.6' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py36,-v1only' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh new file mode 100644 index 00000000000000..f6415a7c9ad17d --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.7) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py37,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh new file mode 100644 index 00000000000000..940cef32ef868f --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.7' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py37,-v1only' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_nonpip.sh new file mode 100644 index 00000000000000..ff7a9f3baef4e6 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_nonpip.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.8 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.8) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py38,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +set +e +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_pip.sh new file mode 100644 index 00000000000000..a27d1f863d620d --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py38_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.8 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.8' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py38,-v1only' +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_libtensorflow.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_libtensorflow.sh new file mode 100644 index 00000000000000..d294311d1ff2db --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_libtensorflow.sh @@ -0,0 +1,40 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e + +# Source the external common scripts. +source tensorflow/tools/ci_build/release/common.sh + + +# Install latest bazel +install_bazelisk +which bazel + +# Install realpath +sudo apt-get install realpath + +export TF_NEED_CUDA=1 + +# Update the version string to nightly +if [ -n "${IS_NIGHTLY_BUILD}" ]; then + ./tensorflow/tools/ci_build/update_version.py --nightly +fi + +./tensorflow/tools/ci_build/linux/libtensorflow.sh + +# Copy the nightly version update script +if [ -n "${IS_NIGHTLY_BUILD}" ]; then + cp tensorflow/tools/ci_build/builds/libtensorflow_nightly_symlink.sh lib_package +fi diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh new file mode 100644 index 00000000000000..6e67bf207300c6 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update Bazel to the desired version +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +######################## +## Build GPU pip package +######################## +bazel build --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + tensorflow/tools/pip_package:build_pip_package + +# Set TF nightly flag so we get the proper version of estimator +if [[ "$IS_NIGHTLY" == 1 ]]; then + NIGHTLY_FLAG="--nightly_flag" +fi + +PIP_WHL_DIR=whl +mkdir -p ${PIP_WHL_DIR} +PIP_WHL_DIR=$(readlink -f ${PIP_WHL_DIR}) # Get absolute path +bazel-bin/tensorflow/tools/pip_package/build_pip_package "${PIP_WHL_DIR}" "${NIGHTLY_FLAG}" +WHL_PATH=$(ls "${PIP_WHL_DIR}"/*.whl) + +cp "${WHL_PATH}" "$(pwd)"/. +chmod +x tensorflow/tools/ci_build/builds/docker_cpu_pip.sh +docker run -e "BAZEL_VERSION=${BAZEL_VERSION}" -e "CI_BUILD_USER=$(id -u -n)" -e "CI_BUILD_UID=$(id -u)" -e "CI_BUILD_GROUP=$(id -g -n)" -e "CI_BUILD_GID=$(id -g)" -e "CI_BUILD_HOME=/bazel_pip" -v "$(pwd)":/bazel_pip tensorflow/tensorflow:devel "./bazel_pip/tensorflow/tools/ci_build/builds/with_the_same_user" "./bazel_pip/tensorflow/tools/ci_build/builds/docker_cpu_pip.sh" diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh new file mode 100644 index 00000000000000..d9a10c9551d86d --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.5) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py35" + +set +e +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --test_tag_filters=${tag_filters} \ + --build_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh new file mode 100644 index 00000000000000..abf5c1db4b47b3 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.5' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py35' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh new file mode 100644 index 00000000000000..547bb0a1fbaa3a --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py36" + +set +e +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --test_tag_filters=${tag_filters} \ + --build_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh new file mode 100644 index 00000000000000..17b52d9ce6b635 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.6' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py36' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh new file mode 100644 index 00000000000000..54a72459fa1dbb --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +install_bazelisk + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.7) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py37" + +set +e +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --build_tag_filters=${tag_filters} \ + --test_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh new file mode 100644 index 00000000000000..2b17849b73793f --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +install_bazelisk + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.7' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py37' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_nonpip.sh new file mode 100644 index 00000000000000..ab88f4712f0238 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_nonpip.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.8 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.8) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py38" + +test +e +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --build_tag_filters=${tag_filters} \ + --test_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... +test_xml_summary_exit diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_pip.sh new file mode 100644 index 00000000000000..1ba8c078021302 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.8 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.8' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py38' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo. +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh b/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh new file mode 100644 index 00000000000000..4fc600de867e50 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e + +# Install latest bazel +source tensorflow/tools/ci_build/release/common.sh +install_bazelisk +which bazel + +# We need py3 lint +sudo pip3 install pep8 + +# TODO(gunan): figure out why we get stuck with later versions of pylint. +# Install pylint. +sudo python3 -m pip install setuptools --upgrade +sudo python2 -m pip install pylint==1.6.4 +sudo python3 -m pip install pylint==1.6.4 + +# TODO(yifeif): print pylint version for debug. remove later. +python3 -m pylint --version + +# Run tensorflow sanity checks. +tensorflow/tools/ci_build/ci_sanity.sh diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat b/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat new file mode 100644 index 00000000000000..67941234b155c0 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\bazel\run_libtensorflow.bat || exit /b 1 + +copy lib_package %TF_ARTIFACTS_DIR%\lib_package diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat new file mode 100644 index 00000000000000..02b12c7650aa35 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python35 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2 --define=no_tensorflow_py_deps=true" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat new file mode 100644 index 00000000000000..e44e6ca6e18c04 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2 --define=no_tensorflow_py_deps=true" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat new file mode 100644 index 00000000000000..c65167a5dc6378 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python37 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2 --define=no_tensorflow_py_deps=true" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py38.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py38.bat new file mode 100644 index 00000000000000..06599fc0d8ca67 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py38.bat @@ -0,0 +1,21 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python38 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2 --define=no_tensorflow_py_deps=true" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" + diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat b/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat new file mode 100644 index 00000000000000..8ab78bef3ca0af --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\bazel\run_libtensorflow.bat || exit /b + +copy lib_package %TF_ARTIFACTS_DIR%\lib_package diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat b/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat new file mode 100644 index 00000000000000..213de532069244 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat @@ -0,0 +1,21 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\integration\gpu_pip_on_cpu\run.bat + diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat new file mode 100644 index 00000000000000..cba62225bee4fe --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python35 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat new file mode 100644 index 00000000000000..ede8bd35f52f24 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat new file mode 100644 index 00000000000000..7509270fc43796 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python37 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat new file mode 100644 index 00000000000000..fc1c600fa5e355 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python38 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh From bbf2a5d4b700097fbf286332f22bb9a0c8d79374 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 16:58:44 -0700 Subject: [PATCH 009/256] Disable a flaky test on mac py38 --- tensorflow/python/kernel_tests/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index f93bf5cd1ae9b8..d37b928ad79211 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -727,6 +727,7 @@ cuda_py_test( name = "matrix_solve_ls_op_test", size = "medium", srcs = ["matrix_solve_ls_op_test.py"], + tags = ["no_mac"], deps = [ "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", From 114ff8c84f2352999a7f4477d355e392f78b8f3c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 19:28:59 -0700 Subject: [PATCH 010/256] Fix rename of gpu pips for single pip package on Windows GPU --- tensorflow/tools/ci_build/rel/windows/gpu_py35.bat | 3 +-- tensorflow/tools/ci_build/rel/windows/gpu_py36.bat | 3 +-- tensorflow/tools/ci_build/rel/windows/gpu_py37.bat | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat index cba62225bee4fe..8a21961fdef3db 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh +bash -l tensorflow\tools\ci_build\release\windows\gpu_py35_full\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat index ede8bd35f52f24..7c4a395f62dd11 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file +bash -l tensorflow\tools\ci_build\release\windows\gpu_py36_full\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat index 7509270fc43796..97eb1168d1ce0d 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file +bash -l tensorflow\tools\ci_build\release\windows\gpu_py37_full\release_pip_rename.sh From 202086f36ea03c92c6c569376fe350a045ba328a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 19:39:24 -0700 Subject: [PATCH 011/256] Also fix single pip Windows GPU renaming on python 3.8 --- tensorflow/tools/ci_build/rel/windows/gpu_py38.bat | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat index fc1c600fa5e355..f980d311a5be5c 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py38.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh +bash -l tensorflow\tools\ci_build\release\windows\gpu_py38_full\release_pip_rename.sh From 7dadd9a7c8bcb42d6e8829f86ce23c7d3ee264dc Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 07:27:59 -0700 Subject: [PATCH 012/256] Revert "2.3.0-rc2 cherry-pick request: Cherry pick library threadsafestatus " --- tensorflow/core/kernels/BUILD | 4 -- tensorflow/core/kernels/batch_kernels.cc | 1 - tensorflow/core/kernels/batching_util/BUILD | 36 ------------ .../batching_util/threadsafe_status.cc | 51 ----------------- .../kernels/batching_util/threadsafe_status.h | 57 ------------------- .../batching_util/threadsafe_status_test.cc | 51 ----------------- 6 files changed, 200 deletions(-) delete mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status.cc delete mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status.h delete mode 100644 tensorflow/core/kernels/batching_util/threadsafe_status_test.cc diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index cbe0276003e69a..7da864a6027811 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -672,10 +672,6 @@ cc_library( "//tensorflow/core:protos_all_cc", "//tensorflow/core/kernels/batching_util:periodic_function_dynamic", "//tensorflow/core/kernels/batching_util:shared_batch_scheduler_hdrs", - "//tensorflow/core/kernels/batching_util:threadsafe_status", - "//tensorflow/core/util:incremental_barrier", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/strings", ], alwayslink = 1, ) diff --git a/tensorflow/core/kernels/batch_kernels.cc b/tensorflow/core/kernels/batch_kernels.cc index 269b4d412cc8e3..6449a399573e1b 100644 --- a/tensorflow/core/kernels/batch_kernels.cc +++ b/tensorflow/core/kernels/batch_kernels.cc @@ -23,7 +23,6 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/batching_util/periodic_function.h" #include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h" -#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" #include "tensorflow/core/kernels/concat_lib.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/split_lib.h" diff --git a/tensorflow/core/kernels/batching_util/BUILD b/tensorflow/core/kernels/batching_util/BUILD index ec246323679f95..803eb2e9048494 100644 --- a/tensorflow/core/kernels/batching_util/BUILD +++ b/tensorflow/core/kernels/batching_util/BUILD @@ -52,30 +52,6 @@ cc_library( ], ) -cc_library( - name = "threadsafe_status", - srcs = ["threadsafe_status.cc"], - hdrs = ["threadsafe_status.h"], - deps = [ - "//tensorflow/core:lib", - "@com_google_absl//absl/base:core_headers", - "@com_google_absl//absl/status", - "@com_google_absl//absl/synchronization", - ], -) - -cc_library( - name = "threadsafe_status", - srcs = ["threadsafe_status.cc"], - hdrs = ["threadsafe_status.h"], - deps = [ - "//tensorflow/core:lib", - "@com_google_absl//absl/base:core_headers", - "@com_google_absl//absl/status", - "@com_google_absl//absl/synchronization", - ], -) - tf_cc_test( name = "batch_scheduler_test", srcs = ["batch_scheduler_test.cc"], @@ -210,18 +186,6 @@ tf_cc_test( ], ) -tf_cc_test( - name = "threadsafe_status_test", - srcs = ["threadsafe_status_test.cc"], - deps = [ - ":threadsafe_status", - "//tensorflow/core:lib", - "//tensorflow/core:protos_all_cc", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - ], -) - cc_library( name = "fake_clock_env", testonly = 1, diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status.cc b/tensorflow/core/kernels/batching_util/threadsafe_status.cc deleted file mode 100644 index fa5cda7161b4e0..00000000000000 --- a/tensorflow/core/kernels/batching_util/threadsafe_status.cc +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" - -#include "absl/base/thread_annotations.h" -#include "absl/status/status.h" -#include "absl/synchronization/mutex.h" -#include "tensorflow/core/platform/mutex.h" - -namespace tensorflow { -const Status& ThreadSafeStatus::status() const& { - tf_shared_lock lock(mutex_); - return status_; -} - -Status ThreadSafeStatus::status() && { - tf_shared_lock lock(mutex_); - return std::move(status_); -} - -void ThreadSafeStatus::Update(const Status& new_status) { - if (new_status.ok()) { - return; - } - - mutex_lock lock(mutex_); - status_.Update(new_status); -} - -void ThreadSafeStatus::Update(Status&& new_status) { - if (new_status.ok()) { - return; - } - - mutex_lock lock(mutex_); - status_.Update(std::forward(new_status)); -} -} // namespace tensorflow diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status.h b/tensorflow/core/kernels/batching_util/threadsafe_status.h deleted file mode 100644 index c14a8a907147bd..00000000000000 --- a/tensorflow/core/kernels/batching_util/threadsafe_status.h +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ -#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ - -#include "tensorflow/core/platform/mutex.h" -#include "tensorflow/core/platform/status.h" -#include "tensorflow/core/platform/thread_annotations.h" - -namespace tensorflow { -// Wrapper class to allow both lock-free construction and concurrent updates on -// a 'status'. -// -// Example Usage: -// std::thread threads[2]; -// ThreadSafeStatus thread_safe_status; -// threads[0] = std::thread([&]() { -// status.Update(errors::Internal("internal error")); -// }); -// threads[1] = std::thread([&]() { -// status.Update(errors::InvalidArgument("invalid argument")); -// }); -// threads[0].Join(); -// threads[1].Join(); -// -// NOTE: -// When updated in a multi-threading setup, only the first error is retained. -class ThreadSafeStatus { - public: - const Status& status() const& TF_LOCKS_EXCLUDED(mutex_); - Status status() && TF_LOCKS_EXCLUDED(mutex_); - - // Retains the first error status: replaces the current status with - // `new_status` if `new_status` is not OK and the previous status is OK. - void Update(const Status& new_status) TF_LOCKS_EXCLUDED(mutex_); - void Update(Status&& new_status) TF_LOCKS_EXCLUDED(mutex_); - - private: - mutable mutex mutex_; - Status status_ TF_GUARDED_BY(mutex_); -}; -} // namespace tensorflow - -#endif // TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_ diff --git a/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc b/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc deleted file mode 100644 index e0c5d03c8a451c..00000000000000 --- a/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/core/kernels/batching_util/threadsafe_status.h" - -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/platform/errors.h" -#include "tensorflow/core/platform/test.h" -#include "tensorflow/core/protobuf/error_codes.pb.h" - -namespace tensorflow { -namespace { - -TEST(ThreadSafeStatus, DefaultOk) { - ThreadSafeStatus status; - TF_EXPECT_OK(status.status()); -} - -TEST(ThreadSafeStatus, Update) { - ThreadSafeStatus status; - TF_EXPECT_OK(status.status()); - - status.Update(errors::FailedPrecondition("original error")); - EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); - - status.Update(Status::OK()); - EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); - - status.Update(errors::Internal("new error")); - EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION); -} - -TEST(ThreadSafeStatus, Move) { - ThreadSafeStatus status; - TF_EXPECT_OK(std::move(status).status()); -} - -} // namespace -} // namespace tensorflow From 454d1f4af1e3aadd597cd837238ebab8788e9079 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 10:56:45 -0700 Subject: [PATCH 013/256] Bump sqlite to 3.33.0 This should handle CVE-2020-15358. PiperOrigin-RevId: 332484006 Change-Id: Id2e7c4e877fcfaa53184fd21139a00f3234a5e3d --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 24446d846cfdf9..c9615431456ac2 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -409,12 +409,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "e9cec01d4519e2d49b3810615237325263fe1feaceae390ee12b4a29bd73dbe2", - strip_prefix = "sqlite-amalgamation-3320300", + sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6", + strip_prefix = "sqlite-amalgamation-3330000", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", - "https://www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", ], ) From 2369d14f9de3d900b5e065504246ce5daaa439bc Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 28 Jul 2020 16:30:26 -0700 Subject: [PATCH 014/256] [tflite] Don't check for buffers on every subgraph. Buffers in the model are allocated globally, hence it makes sense to check for their presence only once (O(1)) instead of on every subgraph (O(n)). PiperOrigin-RevId: 323677724 Change-Id: I2da0c381093006828cc4c80f03dec8a917782861 --- tensorflow/lite/interpreter_builder.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tensorflow/lite/interpreter_builder.cc b/tensorflow/lite/interpreter_builder.cc index 4b491d41881304..3c457523ca6b0f 100644 --- a/tensorflow/lite/interpreter_builder.cc +++ b/tensorflow/lite/interpreter_builder.cc @@ -609,7 +609,12 @@ TfLiteStatus InterpreterBuilder::operator()( auto* buffers = model_->buffers(); if (subgraphs->size() == 0) { - error_reporter_->Report("No subgraph in the model.\n"); + TF_LITE_REPORT_ERROR(error_reporter_, "No subgraph in the model.\n"); + return cleanup_and_error(); + } + + if (!buffers) { + TF_LITE_REPORT_ERROR(error_reporter_, "No buffers in the model.\n"); return cleanup_and_error(); } @@ -630,10 +635,10 @@ TfLiteStatus InterpreterBuilder::operator()( (*interpreter)->subgraph(subgraph_index); auto operators = subgraph->operators(); auto tensors = subgraph->tensors(); - if (!operators || !tensors || !buffers) { - error_reporter_->Report( - "Did not get operators, tensors, or buffers in subgraph %d.\n", - subgraph_index); + if (!operators || !tensors) { + TF_LITE_REPORT_ERROR(error_reporter_, + "Did not get operators or tensors in subgraph %d.\n", + subgraph_index); return cleanup_and_error(); } if (modified_subgraph->AddTensors(tensors->size()) != kTfLiteOk) { From 00c7ed7ce81c2126ebc17dfe7073b5c0efd5ec0a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 13:04:23 -0700 Subject: [PATCH 015/256] [tflite] Validate segment ids for segment_sum. Segment identifiers in segment_sum should be in a 1-D tensor of same size as the first dimension of the input. The values of the tensor should be integers from {0, 1, 2, ... k-1}, where k is the first dimension of the input. The segment identifiers must not contain jumps and must be increasing. See https://www.tensorflow.org/api_docs/python/tf/math#Segmentation as the source for these constraints. PiperOrigin-RevId: 332510942 Change-Id: I898beaba00642c918bcd4b4d4ce893ebb190d869 --- tensorflow/lite/kernels/segment_sum.cc | 19 ++++++++++-- tensorflow/lite/kernels/segment_sum_test.cc | 32 +++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/kernels/segment_sum.cc b/tensorflow/lite/kernels/segment_sum.cc index 8185359321e629..4b762184a50647 100644 --- a/tensorflow/lite/kernels/segment_sum.cc +++ b/tensorflow/lite/kernels/segment_sum.cc @@ -34,11 +34,24 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { - int max_index = -1; + // Segment ids should be of same cardinality as first input dimension and they + // should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid) const int segment_id_size = segment_ids->dims->data[0]; - if (segment_id_size > 0) { - max_index = segment_ids->data.i32[segment_id_size - 1]; + TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]); + int previous_segment_id = -1; + for (int i = 0; i < segment_id_size; i++) { + const int current_segment_id = GetTensorData(segment_ids)[i]; + if (i == 0) { + TF_LITE_ENSURE_EQ(context, current_segment_id, 0); + } else { + int delta = current_segment_id - previous_segment_id; + TF_LITE_ENSURE(context, delta == 0 || delta == 1); + } + previous_segment_id = current_segment_id; } + + const int max_index = previous_segment_id; + const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; diff --git a/tensorflow/lite/kernels/segment_sum_test.cc b/tensorflow/lite/kernels/segment_sum_test.cc index ec531ffd92de10..286742c09330c4 100644 --- a/tensorflow/lite/kernels/segment_sum_test.cc +++ b/tensorflow/lite/kernels/segment_sum_test.cc @@ -110,5 +110,37 @@ TEST(SegmentSumOpModelTest, Float32Test_ThreeDimensions) { EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 1})); } +TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotSorted) { + SegmentSumOpModel model({TensorType_INT32, {3, 2}}, + {TensorType_INT32, {3}}); + model.PopulateTensor(model.data(), {1, 2, 3, 4, 5, 6}); + model.PopulateTensor(model.segment_ids(), {0, 3, 1}); + ASSERT_EQ(model.InvokeUnchecked(), kTfLiteError); +} + +TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotConsecutive) { + SegmentSumOpModel model({TensorType_INT32, {3, 2}}, + {TensorType_INT32, {3}}); + model.PopulateTensor(model.data(), {1, 2, 3, 4, 5, 6}); + model.PopulateTensor(model.segment_ids(), {0, 3, 5}); + ASSERT_EQ(model.InvokeUnchecked(), kTfLiteError); +} + +TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNegative) { + SegmentSumOpModel model({TensorType_INT32, {3, 2}}, + {TensorType_INT32, {3}}); + model.PopulateTensor(model.data(), {1, 2, 3, 4, 5, 6}); + model.PopulateTensor(model.segment_ids(), {-1, 0, 1}); + ASSERT_EQ(model.InvokeUnchecked(), kTfLiteError); +} + +TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotTheRightCardinality) { + SegmentSumOpModel model({TensorType_INT32, {3, 2}}, + {TensorType_INT32, {2}}); + model.PopulateTensor(model.data(), {1, 2, 3, 4, 5, 6}); + model.PopulateTensor(model.segment_ids(), {0, 1}); + ASSERT_EQ(model.InvokeUnchecked(), kTfLiteError); +} + } // namespace } // namespace tflite From 42ed6ac86856956da65b5957a26fab130ff9471c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 13:10:41 -0700 Subject: [PATCH 016/256] [tflite] Test for `kTfLiteOptionalTensor` in `GetInput`. `GetInput`, `GetVariableInput` and `GetOutput` all fail to check for the case where `node->inputs->data[index]` is the special `kTfLiteOptionalTensor` value (-1) which then causes `context->tensors[node->inputs->data[index]]` to read from invalid memory location. This fix makes `GetInput` and related return `nullptr` in those cases, asking the caller to check for `nullptr`. This is better than having `GetOptionalInputTensor` and `GetOptionalOutputTensor` (does not exist but could be added) as using the patched `GetInput` in error would be caught by a sanitizer test in the default optimized build (due to the `-fsanitize=null` option). PiperOrigin-RevId: 332512190 Change-Id: Iabca54da2f2de02b6ece3c38b54f76d4277d689e --- tensorflow/lite/kernels/kernel_util.h | 32 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 6bd6bb1c7ed114..8fae5740e50209 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -30,27 +30,49 @@ inline int SizeOfDimension(const TfLiteTensor* t, int dim) { } inline const TfLiteTensor* GetInput(const TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[node->inputs->data[index]]; + const int tensor_index = node->inputs->data[index]; + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } // Note: You must check if result is not null: // TfLiteTensor* my_tensor = GetVariableInput(context, node, kMyTensorIdx); // TF_LITE_ENSURE(context, my_tensor != nullptr); inline TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { - TfLiteTensor* tensor = &context->tensors[node->inputs->data[index]]; + const int tensor_index = node->inputs->data[index]; + if (tensor_index < 0) { + return nullptr; + } + TfLiteTensor* tensor = &context->tensors[tensor_index]; +>>>>>>> d8f8236c29 ([tflite] Test for `kTfLiteOptionalTensor` in `GetInput`.) return (tensor->is_variable) ? tensor : nullptr; } inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[node->outputs->data[index]]; + const int tensor_index = node->outputs->data[index]; + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } inline TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[node->temporaries->data[index]]; + const int tensor_index = node->temporaries->data[index]; + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } + inline const TfLiteTensor* GetIntermediates(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[node->intermediates->data[index]]; + const int tensor_index = node->intermediates->data[index]; + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } From 7e283f97d8c784d3eae5062d9de25d0f432ad239 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 18:40:41 -0700 Subject: [PATCH 017/256] [tflite] Make `GetOptionalInputTensor` the same as `GetInput`. With the previous change, there is no more need for two separate APIs. We would deprecate `GetOptionalInputTensor` in the future. PiperOrigin-RevId: 332513386 Change-Id: Id7110271c25ebd6126ad8c82a493e37e0e0756b3 --- tensorflow/lite/kernels/kernel_util.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 8fae5740e50209..aae28537edfe21 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -95,12 +95,7 @@ inline int64_t NumElements(const TfLiteTensor* t) { inline const TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context, const TfLiteNode* node, int index) { - const bool use_tensor = index < node->inputs->size && - node->inputs->data[index] != kTfLiteOptionalTensor; - if (use_tensor) { - return &context->tensors[node->inputs->data[index]]; - } - return nullptr; + return GetInput(context, node, index); } // Determines whether tensor is constant. From 094329d0dcb8290bed2b1ee420934971f422c86d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 19:22:10 -0700 Subject: [PATCH 018/256] [tflite] Ensure inputs and outputs don't overlap. If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen. PiperOrigin-RevId: 332522916 Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a --- tensorflow/lite/core/subgraph.cc | 37 ++++++++++++++++++++++++++++++++ tensorflow/lite/core/subgraph.h | 9 ++++++++ 2 files changed, 46 insertions(+) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 0f11af5148859a..5ad8b51d433647 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -567,6 +567,33 @@ TfLiteStatus Subgraph::CheckTensorIndices(const char* label, const int* indices, return kTfLiteOk; } +// We have two arrays and we need to check that elements from one array don't +// show up in the other. We could sort both arrays and then iterate with two +// pointers from start to finish always increasing the smaller one but since +// these arrays are usually short (<25 elements for inputs, usually <3 for +// outputs), this might be slower than the naive approach (if arrays have size n +// and m, with n >> m ~ O(1), first approach is O(nlogn) whereas the other is +// O(n)). Plus, sorting the input and output arrays might not be something we +// want as it destroys ordering of elements. +// +// If it turns out that this is an issue, we can switch to the other algorithm. +TfLiteStatus Subgraph::CheckInputAndOutputForOverlap(const int* input_indices, + int num_inputs, + const int* output_indices, + int num_outputs) { + for (int i = 0; i < num_inputs; i++) { + for (int j = 0; j < num_outputs; j++) { + if (input_indices[i] == output_indices[j]) { + ReportError("Tensor %d is both input %d and output %d\n", + input_indices[i], i, j); + consistent_ = false; + return kTfLiteError; + } + } + } + return kTfLiteOk; +} + namespace { // Multiply two sizes and return true if overflow occurred; // This is based off tensorflow/overflow.h but is simpler as we already @@ -688,6 +715,16 @@ TfLiteStatus Subgraph::AddNodeWithParameters( &context_, CheckTensorIndices("node outputs", outputs.data(), outputs.size())); + // For builtin ops, inputs and outputs must not overlap. Custom ops must do + // this check by themselves if they don't support overlapping tensors. This + // distinction is to allow custom ops to just forward a tensor, reusing it as + // both input and output. + if (builtin_data != nullptr) { + TF_LITE_ENSURE_OK(&context_, CheckInputAndOutputForOverlap( + inputs.data(), inputs.size(), + outputs.data(), outputs.size())); + } + int new_node_index = nodes_and_registration_.size(); if (node_index) *node_index = new_node_index; nodes_and_registration_.resize(nodes_and_registration_.size() + 1); diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index bee13c9073e48a..979c709614c90a 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -433,6 +433,15 @@ class Subgraph { TfLiteStatus CheckTensorIndices(const char* label, const int* indices, int length); + // Check that the input indices and the output indices don't overlap. + // This is needed because same tensor must not be used both as input and + // output for an operator. + // NOTE: this changes consistent_ to be false if indices are out of bounds. + TfLiteStatus CheckInputAndOutputForOverlap(const int* input_indices, + int num_inputs, + const int* output_indices, + int num_outputs); + // Compute the number of bytes required to represent a tensor with dimensions // specified by the array dims (of length dims_size). Returns the status code // and bytes. From 1a506aef22640364ca629ebefa14605a33d2efae Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:00:09 -0700 Subject: [PATCH 019/256] [tflite] Ensure input tensors don't have `nullptr` buffers. A crafted TFLite model can force a node to have as input a tensor backed by a `nullptr` buffer. That is, by carefully changing the buffer index in the flatbuffer serialization, we can force the TFLite interpreter to consider a read-only tensor to be a read-write one and assume that there is an operator that has this tensor as output, writing to it and allocating memory before the tensor is used as input. If this does not happen, we get memory corruption. PiperOrigin-RevId: 332524692 Change-Id: I57ef175152a29020af9ab041dc959e5631dce40f --- tensorflow/lite/core/subgraph.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 5ad8b51d433647..0646adebf8ce4b 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/arena_planner.h" +#include "third_party/tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/core/api/tensor_utils.h" @@ -971,6 +972,19 @@ TfLiteStatus Subgraph::Invoke() { tensor->data_is_stale) { TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index)); } + if (tensor->data.raw == nullptr && tensor->bytes > 0) { + if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) { + // In general, having a tensor here with no buffer will be an error. + // However, for the reshape operator, the second input tensor is only + // used for the shape, not for the data. Thus, null buffer is ok. + continue; + } else { + // In all other cases, we need to return an error as otherwise we will + // trigger a null pointer dereference (likely). + ReportError("Input tensor %d lacks data", tensor_index); + return kTfLiteError; + } + } } if (check_cancelled_func_ != nullptr && From cd671a90fc4af7cd9793be82a014d971c8869aea Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 14:19:26 -0700 Subject: [PATCH 020/256] [tflite] Ensure `MatchingDim` does not allow buffer overflow. We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes. A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits. PiperOrigin-RevId: 332526127 Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4 --- tensorflow/lite/kernels/internal/types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/internal/types.h b/tensorflow/lite/kernels/internal/types.h index 2a34f6608a33f1..adbd34b01467ab 100644 --- a/tensorflow/lite/kernels/internal/types.h +++ b/tensorflow/lite/kernels/internal/types.h @@ -432,7 +432,7 @@ int MatchingArraySize(const ArrayType1& array1, int index1, inline int MatchingDim(const RuntimeShape& shape1, int index1, const RuntimeShape& shape2, int index2) { TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); - return shape1.Dims(index1); + return std::min(shape1.Dims(index1), shape2.Dims(index2)); } template From b3925917c42952cd66eb1fa9fa24d7738fac5b57 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 14:43:00 -0700 Subject: [PATCH 021/256] [tflite] Ensure `ResolveAxis` properly handles negative inputs. In Python, a list `l` of length `n` allows indexing with negative indices, `l[i]`. The only constraint is that `n + i` becomes positive. Code in `ResolveAxis` assumes the constraints and only checks it using a `DCHECK`. But the macro is a no-op in non-debug builds and that can result in reading from negative offsets (buffer underflows). PiperOrigin-RevId: 332530683 Change-Id: I464e073fee618054ae3719a3679739007bb3f3bc --- tensorflow/lite/kernels/internal/reference/reduce.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/internal/reference/reduce.h b/tensorflow/lite/kernels/internal/reference/reduce.h index fbad266e843b8b..8291141618fbf2 100644 --- a/tensorflow/lite/kernels/internal/reference/reduce.h +++ b/tensorflow/lite/kernels/internal/reference/reduce.h @@ -70,6 +70,9 @@ inline bool ResolveAxis(const int num_dims, const int* axis, // eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */ int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx]; TFLITE_DCHECK(current >= 0 && current < num_dims); + if (current < 0 || current >= num_dims) { + return false; + } bool is_dup = false; for (int j = 0; j < *out_num_axis; ++j) { if (out_axis[j] == current) { From 9a64529bd4b8a01ff05480c72dbcc833914f9636 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:43:13 -0700 Subject: [PATCH 022/256] Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`. We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail. PiperOrigin-RevId: 332536309 Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff --- tensorflow/cc/saved_model/loader.cc | 46 +++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index f9c720a2ba2675..1ecc0ab7a50f22 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/cc/saved_model/loader_util.h" #include "tensorflow/cc/saved_model/reader.h" #include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/function.proto.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" @@ -72,26 +73,41 @@ uint64 GetLatencyMicroseconds(const uint64 start_microseconds) { // Ensure that constant tensors loaded from the saved model have valid shape. // Also ensure that constant nodes have a value assigned to them. // TODO(b/154763635): this is temporary and will be replaced with a better audit +static Status ValidateNode(const NodeDef& node) { + const auto node_iterator = node.attr().find("value"); + if (node_iterator != node.attr().end()) { + AttrValue node_value = node_iterator->second; + if (node_value.has_tensor()) { + const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); + if (node_shape.num_elements() < 0) { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), "\" (op \"", node.op(), + "\") which initializes from a tensor with ", + node_shape.num_elements(), " elements"); + } + } + } else if (node.op() == "Const") { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), + "\" which is a constant tensor but no value has been provided"); + } + return Status::OK(); +} + static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { - const auto node_iterator = node.attr().find("value"); - if (node_iterator != node.attr().end()) { - AttrValue node_value = node_iterator->second; - if (node_value.has_tensor()) { - const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); - if (node_shape.num_elements() < 0) { - return errors::FailedPrecondition( - "Saved model contains node \"", node.name(), "\" (op \"", - node.op(), "\") which initializes from a tensor with ", - node_shape.num_elements(), " elements"); - } + TF_RETURN_IF_ERROR(ValidateNode(node)); + } + + if (graph_def.has_library()) { + const FunctionDefLibrary& library = graph_def.library(); + for (const auto& function : library.function()) { + for (const auto& node : function.node_def()) { + TF_RETURN_IF_ERROR(ValidateNode(node)); } - } else if (node.op() == "Const") { - return errors::FailedPrecondition( - "Saved model contains node \"", node.name(), - "\" which is a constant tensor but no value has been provided"); } } + return Status::OK(); } From b98674e737217416c35d7a9a0e16757ee540724d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:54:29 -0700 Subject: [PATCH 023/256] Fix bad import --- tensorflow/lite/core/subgraph.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 0646adebf8ce4b..00a37815d21a4a 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -18,7 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/arena_planner.h" -#include "third_party/tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/core/api/tensor_utils.h" From 0fde760d7eebb7bf17e4d6a45b20327b5c11383b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:57:03 -0700 Subject: [PATCH 024/256] Remove merge conflict marker from failed merge resolution --- tensorflow/lite/kernels/kernel_util.h | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index aae28537edfe21..59b1974c3b93df 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -46,7 +46,6 @@ inline TfLiteTensor* GetVariableInput(TfLiteContext* context, return nullptr; } TfLiteTensor* tensor = &context->tensors[tensor_index]; ->>>>>>> d8f8236c29 ([tflite] Test for `kTfLiteOptionalTensor` in `GetInput`.) return (tensor->is_variable) ? tensor : nullptr; } inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, From 892d5e599f25a9b17befc54fb9be7e893bcd14b8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 15:52:05 -0700 Subject: [PATCH 025/256] Validate `data_splits` for `tf.StringNGrams`. Without validation, we can cause a heap buffer overflow which results in data leakage and/or segfaults. PiperOrigin-RevId: 332543478 Change-Id: Iee5bda24497a195d09d122355502480830b1b317 --- tensorflow/core/kernels/string_ngrams_op.cc | 13 ++++++++++++ tensorflow/python/ops/raw_ops_test.py | 23 ++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 97b32c4242ccdc..8aed2b3831a2f4 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -19,6 +19,7 @@ limitations under the License. #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace text { @@ -60,6 +61,18 @@ class StringNGramsOp : public tensorflow::OpKernel { OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat(); + // Validate that the splits are valid indices into data + const int input_data_size = data->flat().size(); + const int splits_vec_size = splits_vec.size(); + for (int i = 0; i < splits_vec_size; ++i) { + bool valid_splits = splits_vec(i) >= 0; + valid_splits = valid_splits && (splits_vec(i) <= input_data_size); + OP_REQUIRES( + context, valid_splits, + errors::InvalidArgument("Invalid split value ", splits_vec(i), + ", must be in [0,", input_data_size, "]")); + } + int num_batch_items = splits_vec.size() - 1; tensorflow::Tensor* ngrams_splits; OP_REQUIRES_OK( diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index fff94f5c25ae8c..ad4f991a6a100e 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -18,16 +18,21 @@ from __future__ import division from __future__ import print_function +from absl.testing import parameterized + from tensorflow.python.eager import context from tensorflow.python.framework import constant_op +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_string_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes -class RawOpsTest(test.TestCase): +@test_util.disable_tfrt +class RawOpsTest(test.TestCase, parameterized.TestCase): def testSimple(self): x = constant_op.constant(1) @@ -58,6 +63,22 @@ def testDefaults(self): gen_math_ops.Any(input=x, axis=0), gen_math_ops.Any(input=x, axis=0, keep_dims=False)) + @parameterized.parameters([[0, 8]], [[-1, 6]]) + def testStringNGramsBadDataSplits(self, splits): + data = ["aa", "bb", "cc", "dd", "ee", "ff"] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Invalid split value"): + self.evaluate( + gen_string_ops.string_n_grams( + data=data, + data_splits=splits, + separator="", + ngram_widths=[2], + left_pad="", + right_pad="", + pad_width=0, + preserve_short_sequences=False)) + if __name__ == "__main__": ops.enable_eager_execution() From ce90127d7037004a1c818a7055995f075fe97daa Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 16:23:20 -0700 Subject: [PATCH 026/256] Prevent segfault in `GetSessionHandle{,V2}`. In eager mode, session state is null. PiperOrigin-RevId: 332548597 Change-Id: If094812c2e094044220b9ba28f7d7601be042f38 --- tensorflow/core/kernels/session_ops.cc | 8 +++++++- tensorflow/python/ops/raw_ops_test.py | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc index d83a714452f2af..e7e73549bc32f3 100644 --- a/tensorflow/core/kernels/session_ops.cc +++ b/tensorflow/core/kernels/session_ops.cc @@ -16,6 +16,7 @@ limitations under the License. // See docs in ../ops/data_flow_ops.cc. #include + #include #include "tensorflow/core/common_runtime/device.h" @@ -27,6 +28,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" @@ -42,7 +44,11 @@ class GetSessionHandleOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); - int64 id = ctx->session_state()->GetNewId(); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "GetSessionHandle called on null session state")); + int64 id = session_state->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index ad4f991a6a100e..0dbd7dcb9169e7 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -25,6 +25,7 @@ from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util +from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_string_ops from tensorflow.python.platform import test @@ -79,6 +80,13 @@ def testStringNGramsBadDataSplits(self, splits): pad_width=0, preserve_short_sequences=False)) + def testGetSessionHandle(self): + if context.executing_eagerly(): + with self.assertRaisesRegex( + errors.FailedPreconditionError, + "GetSessionHandle called on null session state"): + gen_data_flow_ops.GetSessionHandle(value=[1]) + if __name__ == "__main__": ops.enable_eager_execution() From 0315fa71402e08c0fb77f5d422554b51dfb8251c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 16:54:17 -0700 Subject: [PATCH 027/256] Prevent format string vulnerability in `tf.strings.as_string`. The `printf` format specifier only allows `#`, `0`, `-`, `+` and space as flag characters. Others are interpreted as width/precision/length modifier or conversion specifiers. If a character does not fit into any of these sets `printf` just displays it. Also add a test suite for `tf.strings.as_string`. Also fix the issue where the flag character was used only if width was specified. PiperOrigin-RevId: 332553548 Change-Id: Ie57cf2a7c14d1a36097642794c14329db669bbba --- tensorflow/core/kernels/BUILD | 18 ++ tensorflow/core/kernels/as_string_op.cc | 19 +- tensorflow/core/kernels/as_string_op_test.cc | 245 +++++++++++++++++++ 3 files changed, 281 insertions(+), 1 deletion(-) create mode 100644 tensorflow/core/kernels/as_string_op_test.cc diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 7da864a6027811..14f7d99bf2e71a 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -6085,6 +6085,24 @@ tf_kernel_library( deps = STRING_DEPS, ) +tf_cc_test( + name = "as_string_op_test", + size = "small", + srcs = ["as_string_op_test.cc"], + deps = [ + ":as_string_op", + ":ops_testutil", + ":ops_util", + "//tensorflow/core:core_cpu", + "//tensorflow/core:framework", + "//tensorflow/core:lib", + "//tensorflow/core:protos_all_cc", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + "//tensorflow/core:testlib", + ], +) + tf_kernel_library( name = "unicode_ops", prefix = "unicode_ops", diff --git a/tensorflow/core/kernels/as_string_op.cc b/tensorflow/core/kernels/as_string_op.cc index 8341909fbc8409..b9af976a654d99 100644 --- a/tensorflow/core/kernels/as_string_op.cc +++ b/tensorflow/core/kernels/as_string_op.cc @@ -65,9 +65,26 @@ class AsStringOp : public OpKernel { OP_REQUIRES(ctx, !(scientific && shortest), errors::InvalidArgument( "Cannot select both scientific and shortest notation")); + format_ = "%"; + if (!fill_string.empty()) { + switch (fill_string[0]) { + case ' ': + case '+': + case '-': + case '0': + case '#': + strings::Appendf(&format_, "%s", fill_string.c_str()); + break; + default: + bool fill_not_supported = true; + OP_REQUIRES(ctx, !fill_not_supported, + errors::InvalidArgument("Fill argument not supported: \"", + fill_string, "\"")); + } + } if (width > -1) { - strings::Appendf(&format_, "%s%d", fill_string.c_str(), width); + strings::Appendf(&format_, "%d", width); } if (precision > -1) { strings::Appendf(&format_, ".%d", precision); diff --git a/tensorflow/core/kernels/as_string_op_test.cc b/tensorflow/core/kernels/as_string_op_test.cc new file mode 100644 index 00000000000000..dff78e25e72025 --- /dev/null +++ b/tensorflow/core/kernels/as_string_op_test.cc @@ -0,0 +1,245 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/framework/types.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/kernels/ops_util.h" +#include "tensorflow/core/lib/core/status_test_util.h" + +namespace tensorflow { +namespace { + +class AsStringGraphTest : public OpsTestBase { + protected: + Status Init(DataType input_type, const string& fill = "", int width = -1, + int precision = -1, bool scientific = false, + bool shortest = false) { + TF_CHECK_OK(NodeDefBuilder("op", "AsString") + .Input(FakeInput(input_type)) + .Attr("fill", fill) + .Attr("precision", precision) + .Attr("scientific", scientific) + .Attr("shortest", shortest) + .Attr("width", width) + .Finalize(node_def())); + return InitOp(); + } +}; + +TEST_F(AsStringGraphTest, Int8) { + TF_ASSERT_OK(Init(DT_INT8)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42", "0", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Int64) { + TF_ASSERT_OK(Init(DT_INT64)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42", "0", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatDefault) { + TF_ASSERT_OK(Init(DT_FLOAT)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues( + &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatScientific) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-4.200000e+01", "0.000000e+00", + "3.141590e+00", "4.200000e+01"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatShortest) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/false, /*shortest=*/true)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42", "0", "3.14159", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatPrecisionOnly) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/2)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42.00", "0.00", "3.14", "42.00"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatWidthOnly) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues( + &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Float_5_2_Format) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5, /*precision=*/2)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42.00", " 0.00", " 3.14", "42.00"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Complex) { + TF_ASSERT_OK(Init(DT_COMPLEX64, /*fill=*/"", /*width=*/5, /*precision=*/2)); + + AddInputFromArray(TensorShape({3}), {{-4, 2}, {0}, {3.14159, -1}}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues( + &expected, {"(-4.00, 2.00)", "( 0.00, 0.00)", "( 3.14,-1.00)"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Bool) { + TF_ASSERT_OK(Init(DT_BOOL)); + + AddInputFromArray(TensorShape({2}), {true, false}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({2})); + test::FillValues(&expected, {"true", "false"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, String) { + Status s = Init(DT_STRING); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "Value for attr 'T' of string is not in the list of allowed values")); +} + +TEST_F(AsStringGraphTest, OnlyOneOfScientificAndShortest) { + Status s = Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true, /*shortest=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), + "Cannot select both scientific and shortest notation")); +} + +TEST_F(AsStringGraphTest, NoShortestForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/false, /*shortest=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "scientific and shortest format not supported for datatype")); +} + +TEST_F(AsStringGraphTest, NoScientificForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "scientific and shortest format not supported for datatype")); +} + +TEST_F(AsStringGraphTest, NoPrecisionForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/5); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains(s.error_message(), + "precision not supported for datatype")); +} + +TEST_F(AsStringGraphTest, LongFill) { + Status s = Init(DT_INT32, /*fill=*/"asdf"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains(s.error_message(), + "Fill string must be one or fewer characters")); +} + +TEST_F(AsStringGraphTest, FillWithZero) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"0", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-042", "0000", "0042"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithSpace) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/" ", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {" -42", " 0", " 42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithChar1) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"-", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42 ", "0 ", "42 "}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithChar3) { + Status s = Init(DT_INT32, /*fill=*/"s"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), "Fill argument not supported")); +} + +TEST_F(AsStringGraphTest, FillWithChar4) { + Status s = Init(DT_INT32, /*fill=*/"n"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), "Fill argument not supported")); +} + +} // end namespace +} // end namespace tensorflow From cb1422c9f24b70ee36dfe5efab8f464761ee7cea Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 17:21:24 -0700 Subject: [PATCH 028/256] Prevent `int64` to `int` truncation in `Shard` API usage. The function argument in `Shard` must be a function of two `int64` arguments. However, we are passing in a function with two `int` arguments. Thus, for large workloads, these arguments get truncated from positive `int64` values to negative `int` ones, resulting in a buffer out of bounds write. PiperOrigin-RevId: 332557334 Change-Id: I236c9a2e7f53580e520571da8ba941a3aa9fa0b5 --- tensorflow/core/kernels/random_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/random_op.cc b/tensorflow/core/kernels/random_op.cc index 152ab5f7d1eabd..0f32e759019682 100644 --- a/tensorflow/core/kernels/random_op.cc +++ b/tensorflow/core/kernels/random_op.cc @@ -205,7 +205,7 @@ class RandomGammaOp : public OpKernel { // avoid a couple flops which can be done on a per-alpha basis. auto DoWork = [samples_per_alpha, num_alphas, &rng, samples_flat, - alpha_flat](int start_output, int limit_output) { + alpha_flat](int64 start_output, int64 limit_output) { using Eigen::numext::exp; using Eigen::numext::log; using Eigen::numext::log1p; From 42783a6d9e83fb3aae8e1885ae01ca68332856b5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 17:49:02 -0700 Subject: [PATCH 029/256] Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767 --- tensorflow/core/kernels/banded_triangular_solve_op.cc | 3 ++- tensorflow/core/kernels/boosted_trees/prediction_ops.cc | 6 +++--- tensorflow/core/kernels/crop_and_resize_op.cc | 4 ++-- tensorflow/core/kernels/nth_element_op.cc | 3 ++- .../core/kernels/parameterized_truncated_normal_op.cc | 8 ++++---- tensorflow/core/kernels/random_binomial_op.cc | 2 +- tensorflow/core/kernels/random_poisson_op.cc | 2 +- tensorflow/core/kernels/stateless_random_ops.cc | 2 +- tensorflow/core/kernels/topk_op.cc | 2 +- 9 files changed, 17 insertions(+), 15 deletions(-) diff --git a/tensorflow/core/kernels/banded_triangular_solve_op.cc b/tensorflow/core/kernels/banded_triangular_solve_op.cc index d01a015502a905..666282e52c8031 100644 --- a/tensorflow/core/kernels/banded_triangular_solve_op.cc +++ b/tensorflow/core/kernels/banded_triangular_solve_op.cc @@ -193,7 +193,8 @@ struct LaunchBatchBandedTriangularSolve { Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost_per_unit, - [&in_x, &in_y, adjoint, lower, &bcast, out](int start, int limit) { + [&in_x, &in_y, adjoint, lower, &bcast, out](int64 start, + int64 limit) { SequentialBandedTriangularSolveKernel::Run( in_x, in_y, lower, adjoint, bcast, out, start, limit); }); diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc index 19be606f184939..e3a908d1b6b20d 100644 --- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc @@ -121,7 +121,7 @@ class BoostedTreesTrainingPredictOp : public OpKernel { auto do_work = [&resource, &bucketized_features, &cached_tree_ids, &cached_node_ids, &output_partial_logits, &output_node_ids, latest_tree, - this](int32 start, int32 end) { + this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { int32 tree_id = cached_tree_ids(i); int32 node_id = cached_node_ids(i); @@ -237,7 +237,7 @@ class BoostedTreesPredictOp : public OpKernel { const int32 last_tree = resource->num_trees() - 1; auto do_work = [&resource, &bucketized_features, &output_logits, last_tree, - this](int32 start, int32 end) { + this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { std::vector tree_logits(logits_dimension_, 0.0); int32 tree_id = 0; @@ -340,7 +340,7 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { // path. Note: feature_ids has one less value than logits_path because the // first value of each logit path will be the bias. auto do_work = [&resource, &bucketized_features, &output_debug_info, - last_tree](int32 start, int32 end) { + last_tree](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { // Proto to store debug outputs, per example. boosted_trees::DebugOutput example_debug_info; diff --git a/tensorflow/core/kernels/crop_and_resize_op.cc b/tensorflow/core/kernels/crop_and_resize_op.cc index 23058788a4b143..4ecd3bc0a797ac 100644 --- a/tensorflow/core/kernels/crop_and_resize_op.cc +++ b/tensorflow/core/kernels/crop_and_resize_op.cc @@ -223,7 +223,7 @@ struct CropAndResize { const int depth = crops.dimension(3); // Sharding across boxes. - auto CropAndResizePerBox = [&](int start_box, int limit_box) { + auto CropAndResizePerBox = [&](int64 start_box, int64 limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); @@ -449,7 +449,7 @@ struct CropAndResizeBackpropImage { grads_image.setZero(); - auto CropAndResizeBackImgPerBox = [&](int start_box, int limit_box) { + auto CropAndResizeBackImgPerBox = [&](int64 start_box, int64 limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); diff --git a/tensorflow/core/kernels/nth_element_op.cc b/tensorflow/core/kernels/nth_element_op.cc index 0e43cc19aae513..bd523f51e27e2d 100644 --- a/tensorflow/core/kernels/nth_element_op.cc +++ b/tensorflow/core/kernels/nth_element_op.cc @@ -95,7 +95,8 @@ struct NthElementFunctor { const int last_dim = input_tensor.dim_size(input_tensor.dims() - 1); // Allocate each row to different shard. - auto SubNthElement = [&, input, output, last_dim, n](int start, int limit) { + auto SubNthElement = [&, input, output, last_dim, n](int64 start, + int64 limit) { // std::nth_element would rearrange the array, so we need a new buffer. std::vector buf(last_dim); diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc index ba1fd280ce7d75..a63457551ac29b 100644 --- a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc +++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc @@ -70,8 +70,8 @@ struct TruncatedNormalFunctor { auto do_work = [samples_per_batch, num_elements, &ctx, &means, &stddevs, &minvals, &maxvals, &gen, &output, - kStdDevsInsideBoundsToUseRandnSampler](int start_batch, - int limit_batch) { + kStdDevsInsideBoundsToUseRandnSampler](int64 start_batch, + int64 limit_batch) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. @@ -333,8 +333,8 @@ struct TruncatedNormalFunctorV2 { auto do_work = [num_batches, samples_per_batch, &ctx, &bcast, &means, &stddevs, &minvals, &maxvals, &gen, &output, - kStdDevsInsideBoundsToUseRandnSampler](int start_output, - int limit_output) { + kStdDevsInsideBoundsToUseRandnSampler](int64 start_output, + int64 limit_output) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. diff --git a/tensorflow/core/kernels/random_binomial_op.cc b/tensorflow/core/kernels/random_binomial_op.cc index 4647457ff6fa8f..4a730fc70f73e8 100644 --- a/tensorflow/core/kernels/random_binomial_op.cc +++ b/tensorflow/core/kernels/random_binomial_op.cc @@ -182,7 +182,7 @@ struct RandomBinomialFunctor { // the sample shape and [H1, ... Hm] for the batch shape of the samples. // We have B1 * ... * Bk samples per batch member we need. auto DoWork = [num_batches, samples_per_batch, &bcast, &counts, &probs, - &gen, &output](int start_output, int limit_output) { + &gen, &output](int64 start_output, int64 limit_output) { // Vectorized intermediate calculations for uniform rejection sampling. // We always generate at most 4 samples. Eigen::array z; diff --git a/tensorflow/core/kernels/random_poisson_op.cc b/tensorflow/core/kernels/random_poisson_op.cc index aa9a0bfe214954..dcb7d6b0f0edfa 100644 --- a/tensorflow/core/kernels/random_poisson_op.cc +++ b/tensorflow/core/kernels/random_poisson_op.cc @@ -97,7 +97,7 @@ struct PoissonFunctor { typedef random::UniformDistribution Uniform; auto DoWork = [num_samples, num_rate, &rng, samples_flat, rate_flat]( - int start_output, int limit_output) { + int64 start_output, int64 limit_output) { // Capturing "rng" by value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "rng" by reference and explicitly do a copy assignment. diff --git a/tensorflow/core/kernels/stateless_random_ops.cc b/tensorflow/core/kernels/stateless_random_ops.cc index 6738a34e3fd229..3150f168828a08 100644 --- a/tensorflow/core/kernels/stateless_random_ops.cc +++ b/tensorflow/core/kernels/stateless_random_ops.cc @@ -252,7 +252,7 @@ class StatelessRandomGammaOp : public StatelessRandomOpBase { // avoid a couple flops which can be done on a per-alpha basis. auto DoWork = [samples_per_alpha, num_alphas, &random, samples_flat, - alpha_flat](int start_output, int limit_output) { + alpha_flat](int64 start_output, int64 limit_output) { // Capturing "random" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "random" by reference and explicitly do a copy assignment. diff --git a/tensorflow/core/kernels/topk_op.cc b/tensorflow/core/kernels/topk_op.cc index c555b42f005604..e2659bbf9d5f52 100644 --- a/tensorflow/core/kernels/topk_op.cc +++ b/tensorflow/core/kernels/topk_op.cc @@ -136,7 +136,7 @@ struct TopKFunctor { return Status::OK(); } - auto SortIndices = [&](int start_batch, int limit_batch) { + auto SortIndices = [&](int64 start_batch, int64 limit_batch) { for (int32 b = start_batch; b < limit_batch; ++b) { const T* input_data = &input(b, 0); const auto stable_comp = [input_data](const int32 a, const int32 b) { From b3075637e9583740e903e56ed8eee227f7abbbf0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 18:15:03 -0700 Subject: [PATCH 030/256] Fix multiple vulnerabilities in `tf.raw_ops.*CountSparseOutput`. Also add tests for these API points, both for the happy paths and for the vulnerable ones. PiperOrigin-RevId: 332563222 Change-Id: Ib3b52116a83a134c2e742a7c66e5e956db8fba05 --- tensorflow/core/kernels/count_ops.cc | 41 +++++++ tensorflow/python/ops/bincount_ops_test.py | 118 +++++++++++++++++++++ 2 files changed, 159 insertions(+) diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index 7c85b050039380..087deef0812f00 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -178,10 +178,30 @@ class SparseCount : public OpKernel { const Tensor& weights = context->input(3); bool use_weights = weights.NumElements() > 0; + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()), + errors::InvalidArgument( + "Input indices must be a 2-dimensional tensor. Got: ", + indices.shape().DebugString())); + + if (use_weights) { + OP_REQUIRES( + context, weights.shape() == values.shape(), + errors::InvalidArgument( + "Weights and values must have the same shape. Weight shape: ", + weights.shape().DebugString(), + "; values shape: ", values.shape().DebugString())); + } + bool is_1d = shape.NumElements() == 1; int num_batches = is_1d ? 1 : shape.flat()(0); int num_values = values.NumElements(); + OP_REQUIRES(context, num_values == indices.shape().dim_size(0), + errors::InvalidArgument( + "Number of values must match first dimension of indices.", + "Got ", num_values, + " values, indices shape: ", indices.shape().DebugString())); + const auto indices_values = indices.matrix(); const auto values_values = values.flat(); const auto weight_values = weights.flat(); @@ -235,12 +255,33 @@ class RaggedCount : public OpKernel { bool use_weights = weights.NumElements() > 0; bool is_1d = false; + if (use_weights) { + OP_REQUIRES( + context, weights.shape() == values.shape(), + errors::InvalidArgument( + "Weights and values must have the same shape. Weight shape: ", + weights.shape().DebugString(), + "; values shape: ", values.shape().DebugString())); + } + const auto splits_values = splits.flat(); const auto values_values = values.flat(); const auto weight_values = weights.flat(); int num_batches = splits.NumElements() - 1; int num_values = values.NumElements(); + OP_REQUIRES( + context, num_batches > 0, + errors::InvalidArgument( + "Must provide at least 2 elements for the splits argument")); + OP_REQUIRES(context, splits_values(0) == 0, + errors::InvalidArgument("Splits must start with 0, not with ", + splits_values(0))); + OP_REQUIRES(context, splits_values(num_batches) == num_values, + errors::InvalidArgument( + "Splits must end with the number of values, got ", + splits_values(num_batches), " instead of ", num_values)); + auto per_batch_counts = BatchedMap(num_batches); T max_value = 0; int batch_idx = 0; diff --git a/tensorflow/python/ops/bincount_ops_test.py b/tensorflow/python/ops/bincount_ops_test.py index 74fd17cae2bce5..e9906e32f95703 100644 --- a/tensorflow/python/ops/bincount_ops_test.py +++ b/tensorflow/python/ops/bincount_ops_test.py @@ -25,7 +25,9 @@ from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import test_util from tensorflow.python.ops import bincount_ops +from tensorflow.python.ops import gen_count_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor @@ -834,5 +836,121 @@ def test_ragged_input_different_shape_fails(self): self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1)) +@test_util.run_all_in_graph_and_eager_modes +@test_util.disable_tfrt +class RawOpsTest(test.TestCase, parameterized.TestCase): + + def testSparseCountSparseOutputBadIndicesShape(self): + indices = [[[0], [0]], [[0], [1]], [[1], [0]], [[1], [2]]] + values = [1, 1, 1, 10] + weights = [1, 2, 4, 6] + dense_shape = [2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Input indices must be a 2-dimensional tensor"): + self.evaluate( + gen_count_ops.SparseCountSparseOutput( + indices=indices, + values=values, + dense_shape=dense_shape, + weights=weights, + binary_output=False)) + + def testSparseCountSparseOutputBadWeightsShape(self): + indices = [[0, 0], [0, 1], [1, 0], [1, 2]] + values = [1, 1, 1, 10] + weights = [1, 2, 4] + dense_shape = [2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Weights and values must have the same shape"): + self.evaluate( + gen_count_ops.SparseCountSparseOutput( + indices=indices, + values=values, + dense_shape=dense_shape, + weights=weights, + binary_output=False)) + + def testSparseCountSparseOutputBadNumberOfValues(self): + indices = [[0, 0], [0, 1], [1, 0]] + values = [1, 1, 1, 10] + weights = [1, 2, 4, 6] + dense_shape = [2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Number of values must match first dimension of indices"): + self.evaluate( + gen_count_ops.SparseCountSparseOutput( + indices=indices, + values=values, + dense_shape=dense_shape, + weights=weights, + binary_output=False)) + + def testRaggedCountSparseOutput(self): + splits = [0, 4, 7] + values = [1, 1, 2, 1, 2, 10, 5] + weights = [1, 2, 3, 4, 5, 6, 7] + output_indices, output_values, output_shape = self.evaluate( + gen_count_ops.RaggedCountSparseOutput( + splits=splits, values=values, weights=weights, binary_output=False)) + self.assertAllEqual([[0, 1], [0, 2], [1, 2], [1, 5], [1, 10]], + output_indices) + self.assertAllEqual([7, 3, 5, 7, 6], output_values) + self.assertAllEqual([2, 11], output_shape) + + def testRaggedCountSparseOutputBadWeightsShape(self): + splits = [0, 4, 7] + values = [1, 1, 2, 1, 2, 10, 5] + weights = [1, 2, 3, 4, 5, 6] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Weights and values must have the same shape"): + self.evaluate( + gen_count_ops.RaggedCountSparseOutput( + splits=splits, + values=values, + weights=weights, + binary_output=False)) + + def testRaggedCountSparseOutputEmptySplits(self): + splits = [] + values = [1, 1, 2, 1, 2, 10, 5] + weights = [1, 2, 3, 4, 5, 6, 7] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Must provide at least 2 elements for the splits argument"): + self.evaluate( + gen_count_ops.RaggedCountSparseOutput( + splits=splits, + values=values, + weights=weights, + binary_output=False)) + + def testRaggedCountSparseOutputBadSplitsStart(self): + splits = [1, 7] + values = [1, 1, 2, 1, 2, 10, 5] + weights = [1, 2, 3, 4, 5, 6, 7] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Splits must start with 0"): + self.evaluate( + gen_count_ops.RaggedCountSparseOutput( + splits=splits, + values=values, + weights=weights, + binary_output=False)) + + def testRaggedCountSparseOutputBadSplitsEnd(self): + splits = [0, 5] + values = [1, 1, 2, 1, 2, 10, 5] + weights = [1, 2, 3, 4, 5, 6, 7] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Splits must end with the number of values"): + self.evaluate( + gen_count_ops.RaggedCountSparseOutput( + splits=splits, + values=values, + weights=weights, + binary_output=False)) + + if __name__ == "__main__": test.main() From 156872df9bbf3c237e6eb9ff226837a03ff479ef Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 18:43:38 -0700 Subject: [PATCH 031/256] Fix heap buffer overflow in `tf.raw_ops.SparseFillEmptyRowsGrad`. Also add tests as they were lacking PiperOrigin-RevId: 332566071 Change-Id: I44277578e26ff5fb3fdb0dcbba6e91b2ec3e7859 --- .../core/kernels/sparse_fill_empty_rows_op.cc | 12 ++++- tensorflow/python/ops/sparse_ops_test.py | 54 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index 8de93cf9b30d74..542069ccd88e18 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -232,6 +232,9 @@ class SparseFillEmptyRowsGradOp : public OpKernel { context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), + errors::InvalidArgument("grad_values must be a vector, saw: ", + grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec(); const auto grad_values = grad_values_t->vec(); @@ -260,8 +263,13 @@ class SparseFillEmptyRowsGradOp : public OpKernel { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. - d_values(i) = grad_values(reverse_index_map(i)); - visited(reverse_index_map(i)) = true; + int64 reverse_index = reverse_index_map(i); + OP_REQUIRES( + context, 0 <= reverse_index && reverse_index < N_full, + errors::InvalidArgument("Elements in reverse index must be in [0, ", + N_full, ") but got ", reverse_index)); + d_values(i) = grad_values(reverse_index); + visited(reverse_index) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of diff --git a/tensorflow/python/ops/sparse_ops_test.py b/tensorflow/python/ops/sparse_ops_test.py index 91151ba8461c53..0b014b55d10cb7 100644 --- a/tensorflow/python/ops/sparse_ops_test.py +++ b/tensorflow/python/ops/sparse_ops_test.py @@ -21,6 +21,7 @@ from absl.testing import parameterized import numpy as np +from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops @@ -29,6 +30,7 @@ # Need array_grad to register gradient for Identity. from tensorflow.python.ops import array_grad # pylint: disable=unused-import from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import gradient_checker_v2 as gradient_checker from tensorflow.python.ops import math_ops # Need sparse_grad to register gradient for SparseToDense. @@ -181,5 +183,57 @@ def testDenseSparseTensorMatMul(self): self.assertAllEqual(expected, result) +@test_util.run_all_in_graph_and_eager_modes +class RawOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): + + def testSparseFillEmptyRowsGrad(self): + reverse_index_map = [2, 1] + grad_values = [0, 1, 2, 3] + d_values, d_default_value = self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + self.assertAllEqual([2, 1], d_values) + self.assertEqual(3, d_default_value) + + def testSparseFillEmptyRowsGradNegativeIndexMapValue(self): + reverse_index_map = [2, -1] + grad_values = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r'Elements in reverse index must be in \[0, 4\)'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + def testSparseFillEmptyRowsGradLargeIndexMapValue(self): + reverse_index_map = [2, 10] + grad_values = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r'Elements in reverse index must be in \[0, 4\)'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + def testSparseFillEmptyRowsGradMatrix(self): + reverse_index_map = [0, 1] + grad_values = [[0, 1], [2, 3]] + # Note: Eager mode and graph mode throw different errors here. Graph mode + # will fail with a ValueError from the shape checking logic, while Eager + # will fail with an InvalidArgumentError from the kernel itself. + if context.executing_eagerly(): + with self.assertRaisesRegex(errors.InvalidArgumentError, + r'grad_values must be a vector'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + else: + with self.assertRaisesRegex(ValueError, + r'Shape must be rank 1 but is rank 2'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + if __name__ == '__main__': googletest.main() From d8c69c287fc1c50988ed8b71b4fab3cc5eec4620 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 19:14:17 -0700 Subject: [PATCH 032/256] Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572 --- tensorflow/c/eager/dlpack.cc | 28 +++++++++++++++++++------ tensorflow/python/dlpack/dlpack_test.py | 8 +++++++ tensorflow/python/tfe_wrapper.cc | 9 +++++++- 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/tensorflow/c/eager/dlpack.cc b/tensorflow/c/eager/dlpack.cc index 45048bd6efb0cd..f6d6ee0710a1d3 100644 --- a/tensorflow/c/eager/dlpack.cc +++ b/tensorflow/c/eager/dlpack.cc @@ -248,21 +248,36 @@ void TFE_CallDLManagedTensorDeleter(void* dlm_ptr) { } void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { + auto tf_dlm_context = GetDlContext(h, status); + if (!status->status.ok()) { + return nullptr; + } + + auto* tf_dlm_data = TFE_TensorHandleDevicePointer(h, status); + if (!status->status.ok()) { + return nullptr; + } + const Tensor* tensor = GetTensorFromHandle(h, status); TF_DataType data_type = static_cast(tensor->dtype()); - TensorReference tensor_ref(*tensor); // This will call buf_->Ref() + auto tf_dlm_type = GetDlDataType(data_type, status); + if (!status->status.ok()) { + return nullptr; + } + + TensorReference tensor_ref(*tensor); // This will call buf_->Ref() auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref); tf_dlm_tensor_ctx->reference = tensor_ref; DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; - dlm_tensor->dl_tensor.ctx = GetDlContext(h, status); + dlm_tensor->dl_tensor.ctx = tf_dlm_context; int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; - dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status); - dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status); + dlm_tensor->dl_tensor.data = tf_dlm_data; + dlm_tensor->dl_tensor.dtype = tf_dlm_type; std::vector* shape_arr = &tf_dlm_tensor_ctx->shape; std::vector* stride_arr = &tf_dlm_tensor_ctx->strides; @@ -275,13 +290,14 @@ void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { (*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1]; } - dlm_tensor->dl_tensor.shape = &(*shape_arr)[0]; + dlm_tensor->dl_tensor.shape = shape_arr->data(); // There are two ways to represent compact row-major data // 1) nullptr indicates tensor is compact and row-majored. // 2) fill in the strides array as the real case for compact row-major data. // Here we choose option 2, since some frameworks didn't handle the strides // argument properly. - dlm_tensor->dl_tensor.strides = &(*stride_arr)[0]; + dlm_tensor->dl_tensor.strides = stride_arr->data(); + dlm_tensor->dl_tensor.byte_offset = 0; // TF doesn't handle the strides and byte_offsets here return static_cast(dlm_tensor); diff --git a/tensorflow/python/dlpack/dlpack_test.py b/tensorflow/python/dlpack/dlpack_test.py index af91da8051284a..df53220849cbd5 100644 --- a/tensorflow/python/dlpack/dlpack_test.py +++ b/tensorflow/python/dlpack/dlpack_test.py @@ -20,9 +20,11 @@ from absl.testing import parameterized import numpy as np + from tensorflow.python.dlpack import dlpack from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import test @@ -95,6 +97,12 @@ def UnsupportedComplex64(): self.assertRaisesRegex(Exception, ".* is not supported by dlpack", UnsupportedComplex64) + def testMustPassTensorArgumentToDLPack(self): + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "The argument to `to_dlpack` must be a TF tensor, not Python object"): + dlpack.to_dlpack([1]) + if __name__ == "__main__": ops.enable_eager_execution() diff --git a/tensorflow/python/tfe_wrapper.cc b/tensorflow/python/tfe_wrapper.cc index 88bb66f189b7e9..3401020ae993b8 100644 --- a/tensorflow/python/tfe_wrapper.cc +++ b/tensorflow/python/tfe_wrapper.cc @@ -1129,9 +1129,16 @@ PYBIND11_MODULE(_pywrap_tfe, m) { // DLPack functions m.def("TFE_ToDlpackCapsule", [](py::handle& o) { PyObject* eager_tensor_pyobject_ptr = o.ptr(); - TFE_TensorHandle* thandle = EagerTensor_Handle(eager_tensor_pyobject_ptr); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); + + if (!EagerTensor_CheckExact(eager_tensor_pyobject_ptr)) { + status->status = tensorflow::errors::InvalidArgument( + "The argument to `to_dlpack` must be a TF tensor, not Python object"); + tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); + } + + TFE_TensorHandle* thandle = EagerTensor_Handle(eager_tensor_pyobject_ptr); void* dlm_ptr = tensorflow::TFE_HandleToDLPack(thandle, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); From 92d5b97a0ad3190b2e69c870e6d6d910b0d0bcb7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 21:16:05 -0700 Subject: [PATCH 033/256] Fix undefined behavior in `tf.raw_ops.Switch` in eager mode. PiperOrigin-RevId: 332578058 Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9 --- tensorflow/core/common_runtime/eager/kernel_and_device.cc | 7 ++++++- .../python/kernel_tests/control_flow_ops_py_test.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc index 1a56cc3051096a..980a75bf254740 100644 --- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc +++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc @@ -307,7 +307,12 @@ Status KernelAndDeviceOp::Run( if (outputs != nullptr) { outputs->clear(); for (int i = 0; i < context.num_outputs(); ++i) { - outputs->push_back(Tensor(*context.mutable_output(i))); + const auto* output_tensor = context.mutable_output(i); + if (output_tensor != nullptr) { + outputs->push_back(Tensor(*output_tensor)); + } else { + outputs->push_back(Tensor()); + } } } return Status::OK(); diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py index eec7165d148c20..0f1485515fb623 100644 --- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py +++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py @@ -4581,6 +4581,14 @@ def testUInt64SwitchMerge(self): result = control_flow_ops.merge([v_f, v_t]) self.evaluate(result) + def testSwitchEagerMode(self): + if not context.executing_eagerly(): + return + input_data = [1, 2, 3, 4] + vf, vt = control_flow_ops.switch(input_data, False) + self.assertAllEqual(vf, input_data) + self.assertAllEqual(vt, []) + @test_util.run_deprecated_v1 def testQIntArgAndRet(self): From 3ed271b0b05b4f1dfd5660944c54b5fe8cc3d8dc Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 15:25:12 -0700 Subject: [PATCH 034/256] Solve leftover from merge conflict --- tensorflow/cc/saved_model/loader.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 1ecc0ab7a50f22..2c1ea2ead14e80 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -21,7 +21,7 @@ limitations under the License. #include "tensorflow/cc/saved_model/loader_util.h" #include "tensorflow/cc/saved_model/reader.h" #include "tensorflow/core/framework/attr_value.pb.h" -#include "tensorflow/core/framework/function.proto.h" +#include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" From 9c91596d4d24bc07b6d36ae48581a2e7b2584edf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 20:45:05 -0700 Subject: [PATCH 035/256] Fix missing import --- tensorflow/python/ops/sparse_ops_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/python/ops/sparse_ops_test.py b/tensorflow/python/ops/sparse_ops_test.py index 0b014b55d10cb7..3a48a6aefd9c36 100644 --- a/tensorflow/python/ops/sparse_ops_test.py +++ b/tensorflow/python/ops/sparse_ops_test.py @@ -24,6 +24,7 @@ from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util From d99bd631ea9b67ffc39c22b35fbf7deca77ad1f7 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Mon, 21 Sep 2020 17:27:07 -0700 Subject: [PATCH 036/256] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 136e71cd5de000..470a20345b6630 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.3.1 + + + # Release 2.3.0 ## Major Features and Improvements From 0d41b1dfc97500e1177cb718a0b14b04914df661 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 21 Sep 2020 18:51:57 -0700 Subject: [PATCH 037/256] Update RELEASE.md --- RELEASE.md | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 470a20345b6630..5dc9456da9c8b1 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,50 @@ # Release 2.3.1 - +## Bug Fixes and Other Changes +* Fixes an undefined behavior causing a segfault in `tf.raw_ops.Switch` + ([CVE-2020-15190](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15190)) +* Fixes three vulnerabilities in conversion to DLPack format + ([CVE-2020-15191](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15191), + [CVE-2020-15192](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15192), + [CVE-2020-15193](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15193)) +* Fixes two vulnerabilities in `SparseFillEmptyRowsGrad` + ([CVE-2020-15194](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15194), + [CVE-2020-15195](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15195)) +* Fixes several vulnerabilities in `RaggedCountSparseOutput` and + `SparseCountSparseOutput` operations + ([CVE-2020-15196](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15196), + [CVE-2020-15197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15197), + [CVE-2020-15198](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15198), + [CVE-2020-15199](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15199), + [CVE-2020-15200](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15200), + [CVE-2020-15201](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15201)) +* Fixes an integer truncation vulnerability in code using the work sharder API + ([CVE-2020-15202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15202)) +* Fixes a format string vulnerability in `tf.strings.as_string` + ([CVE-2020-15203](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15203)) +* Fixes segfault raised by calling session-only ops in eager mode + ([CVE-2020-15204](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15204)) +* Fixes data leak and potential ASLR violation from `tf.raw_ops.StringNGrams` + ([CVE-2020-15205](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15205)) +* Fixes segfaults caused by incomplete `SavedModel` validation + ([CVE-2020-15206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15206)) +* Fixes a data corruption due to a bug in negative indexing support in TFLite + ([CVE-2020-15207](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15207)) +* Fixes a data corruption due to dimension mismatch in TFLite + ([CVE-2020-15208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15208)) +* Fixes several vulnerabilities in TFLite saved model format + ([CVE-2020-15209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15209), + [CVE-2020-15210](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15210), + [CVE-2020-15211](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15211)) +* Fixes several vulnerabilities in TFLite implementation of segment sum + ([CVE-2020-15212](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15212), + [CVE-2020-15213](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15213), + [CVE-2020-15214](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15214)) +* Updates `sqlite3` to `3.33.00` to handle + [CVE-2020-15358](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15358). +* Fixes deprecated usage of `collections` API +* Removes `scipy` dependency from `setup.py` since TensorFlow does not need it + to install the pip package # Release 2.3.0 From 4cf223069a94c78b208e6c829d5f938a0fae7d07 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Mon, 21 Sep 2020 18:55:42 -0700 Subject: [PATCH 038/256] Update version numbers to 2.3.1 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 077fdffa2bfbe4..2e3bf62f6089a9 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 3 -#define TF_PATCH_VERSION 0 +#define TF_PATCH_VERSION 1 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 7a5e26fc7124c9..3ba217c49309ce 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -59,7 +59,7 @@ load( # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.3.0" +VERSION = "2.3.1" VERSION_MAJOR = VERSION.split(".")[0] # Sanitize a dependency so that it works correctly from code that includes diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 47a3830f15a204..594e74f40c0340 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -49,7 +49,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.3.0' +_VERSION = '2.3.1' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 4a20f6eb6b560a2f4cabc8c9428170b9235d8a27 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 8 Sep 2020 13:09:20 -0700 Subject: [PATCH 039/256] Add cuda compute configs to the variable list for libtensorflow GPU builds. PiperOrigin-RevId: 330570975 Change-Id: Iafdd2711c505152cc719b2ce636a9aa18691e00f --- tensorflow/tools/ci_build/linux/libtensorflow_docker.sh | 1 + tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh b/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh index 1b255682671a78..fc8fad8eb76d5a 100755 --- a/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh +++ b/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh @@ -58,6 +58,7 @@ ${DOCKER_BINARY} run \ -e "TF_NEED_HDFS=0" \ -e "TF_NEED_CUDA=${TF_NEED_CUDA}" \ -e "TF_NEED_TENSORRT=${TF_NEED_CUDA}" \ + -e "TF_CUDA_COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES}" \ -e "TF_NEED_ROCM=${TF_NEED_ROCM}" \ -e "TF_NEED_OPENCL_SYCL=0" \ "${DOCKER_IMAGE}" \ diff --git a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh b/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh index 6dca0c37c87017..99397452acf22a 100755 --- a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh +++ b/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh @@ -19,4 +19,5 @@ set -ex SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" export TF_NEED_CUDA=1 +export TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" "${SCRIPT_DIR}/libtensorflow_docker.sh" From 9f72a2cce38712222061e61a6e19485a1710d8f3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 10 Sep 2020 12:05:11 -0700 Subject: [PATCH 040/256] Fix libtensorflow CUDA compute capabilities. PiperOrigin-RevId: 330987201 Change-Id: I1a10343229216f70da922c163cc9ebcbfe069947 --- tensorflow/tools/ci_build/builds/libtensorflow.sh | 1 + tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/builds/libtensorflow.sh b/tensorflow/tools/ci_build/builds/libtensorflow.sh index a281afe7442d21..d0be3284eb45ee 100755 --- a/tensorflow/tools/ci_build/builds/libtensorflow.sh +++ b/tensorflow/tools/ci_build/builds/libtensorflow.sh @@ -56,6 +56,7 @@ function build_libtensorflow_tarball() { if [ "${TF_NEED_CUDA}" == "1" ]; then BAZEL_OPTS="${BAZEL_OPTS} --config=cuda --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" export TF_NEED_ROCM=0 + export TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" fi bazel clean --expunge yes "" | ./configure diff --git a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh b/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh index 99397452acf22a..6dca0c37c87017 100755 --- a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh +++ b/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh @@ -19,5 +19,4 @@ set -ex SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" export TF_NEED_CUDA=1 -export TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" "${SCRIPT_DIR}/libtensorflow_docker.sh" From 9133b1663a449dee04098d9c62dd20ffb4aec699 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Fri, 2 Oct 2020 11:07:24 -0700 Subject: [PATCH 041/256] Remove Unsupported gpu architecture compute_80 --- tensorflow/tools/ci_build/builds/libtensorflow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/builds/libtensorflow.sh b/tensorflow/tools/ci_build/builds/libtensorflow.sh index d0be3284eb45ee..a6fa334a395345 100755 --- a/tensorflow/tools/ci_build/builds/libtensorflow.sh +++ b/tensorflow/tools/ci_build/builds/libtensorflow.sh @@ -56,7 +56,7 @@ function build_libtensorflow_tarball() { if [ "${TF_NEED_CUDA}" == "1" ]; then BAZEL_OPTS="${BAZEL_OPTS} --config=cuda --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" export TF_NEED_ROCM=0 - export TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" + export TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75" fi bazel clean --expunge yes "" | ./configure From 1b20c051f4588178446283088d8eb01035e2984b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Oct 2020 07:07:12 +0000 Subject: [PATCH 042/256] Bump junit in /tensorflow/java/maven/tensorflow-hadoop Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1. - [Release notes](https://github.com/junit-team/junit4/releases) - [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md) - [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1) Signed-off-by: dependabot[bot] --- tensorflow/java/maven/tensorflow-hadoop/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/java/maven/tensorflow-hadoop/pom.xml b/tensorflow/java/maven/tensorflow-hadoop/pom.xml index e900d81e5dab50..675a3369cf1ff3 100644 --- a/tensorflow/java/maven/tensorflow-hadoop/pom.xml +++ b/tensorflow/java/maven/tensorflow-hadoop/pom.xml @@ -16,7 +16,7 @@ 1.6 2.6.0 3.5.1 - 4.11 + 4.13.1 From e99de30c599d68114ec8a48f0b31ad147e18735a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Oct 2020 06:46:11 +0000 Subject: [PATCH 043/256] Bump junit in /tensorflow/java/maven/spark-tensorflow-connector Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1. - [Release notes](https://github.com/junit-team/junit4/releases) - [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md) - [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1) Signed-off-by: dependabot[bot] --- tensorflow/java/maven/spark-tensorflow-connector/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml index f40090ac45d6d9..19f5e29da2bf38 100644 --- a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml +++ b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml @@ -35,7 +35,7 @@ 1.8 2.4.5 2.7.3 - 4.11 + 4.13.1 From 344778c33f19326db1744600377219caabd3c14b Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Nov 2020 21:35:49 +0000 Subject: [PATCH 044/256] Bump libjpeg-turbo from 2.0.4 to 2.0.5 It looks like the latest libjpeg-turbo is 2.0.5 so this PR bumps the version (currently on 2.0.4). Signed-off-by: Yong Tang --- third_party/jpeg/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/third_party/jpeg/workspace.bzl b/third_party/jpeg/workspace.bzl index c458ff12ba8248..60f989df722152 100644 --- a/third_party/jpeg/workspace.bzl +++ b/third_party/jpeg/workspace.bzl @@ -6,11 +6,11 @@ def repo(): third_party_http_archive( name = "libjpeg_turbo", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", - "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.5.tar.gz", + "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.5.tar.gz", ], - sha256 = "7777c3c19762940cff42b3ba4d7cd5c52d1671b39a79532050c85efb99079064", - strip_prefix = "libjpeg-turbo-2.0.4", + sha256 = "b3090cd37b5a8b3e4dbd30a1311b3989a894e5d3c668f14cbc6739d77c9402b7", + strip_prefix = "libjpeg-turbo-2.0.5", build_file = "//third_party/jpeg:BUILD.bazel", system_build_file = "//third_party/jpeg:BUILD.system", ) From 05778aea110ba87404d50ae8798c2c5312e958fb Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 24 Nov 2020 11:40:42 -0800 Subject: [PATCH 045/256] Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2 --- .../Eigen/CXX11/src/FixedPoint/FixedPointTypes.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h index ff359cedced961..fd35360da28208 100644 --- a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h +++ b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h @@ -49,7 +49,7 @@ struct scalar_product_traits { // the compiler from silently type cast the mantissa into a bigger or a smaller // representation. struct QInt8 { - QInt8() {} + QInt8() : value(0) {} QInt8(const int8_t v) : value(v) {} QInt8(const QInt32 v); @@ -59,7 +59,7 @@ struct QInt8 { }; struct QUInt8 { - QUInt8() {} + QUInt8() : value(0) {} QUInt8(const uint8_t v) : value(v) {} QUInt8(const QInt32 v); @@ -69,7 +69,7 @@ struct QUInt8 { }; struct QInt16 { - QInt16() {} + QInt16() : value(0) {} QInt16(const int16_t v) : value(v) {} QInt16(const QInt32 v); operator int() const { return static_cast(value); } @@ -78,7 +78,7 @@ struct QInt16 { }; struct QUInt16 { - QUInt16() {} + QUInt16() : value(0) {} QUInt16(const uint16_t v) : value(v) {} QUInt16(const QInt32 v); operator int() const { return static_cast(value); } @@ -87,7 +87,7 @@ struct QUInt16 { }; struct QInt32 { - QInt32() {} + QInt32() : value(0) {} QInt32(const int8_t v) : value(v) {} QInt32(const int32_t v) : value(v) {} QInt32(const uint32_t v) : value(static_cast(v)) {} From 0b289c33bed2e9338d42378ffffeb71552c3caeb Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 7 Dec 2020 11:15:21 -0800 Subject: [PATCH 046/256] Validate that `DataFormat*` attributes form a permutation. The `src_format` and `dst_format` attributes for the `DataFormatDimMap` and `DataFormatVecPermute` raw ops are supposed to determine a permutation. However, this was not validated and could result in unitialized memory accesses as well as writes outside of bounds and potential crashes. While here, we also test that the format attributes have the needed length, add tests for all validation failure cases, remove unnecessary calls to `strings::StrCat`, and fix a few grammar errors. This will be cherry-picked on the supported release branches. PiperOrigin-RevId: 346135579 Change-Id: I1c76392382c89ad8f072d5bc93d70669851eb404 --- tensorflow/core/kernels/data_format_ops.cc | 75 +++++++++++++++-- tensorflow/python/ops/nn_test.py | 95 ++++++++++++++++++++++ 2 files changed, 162 insertions(+), 8 deletions(-) diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc index 181aa1b8a2cab2..771986f2ee84d4 100644 --- a/tensorflow/core/kernels/data_format_ops.cc +++ b/tensorflow/core/kernels/data_format_ops.cc @@ -18,16 +18,52 @@ limitations under the License. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/data_format_ops.h" + +#include + #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; +// Ensure that `src` and `dst` define a valid permutation. +// Ops defined in this file assume that user specifies a permutation via two +// string attributes. This check validates that these attributes properly define +// it to prevent security vulnerabilities. +static bool IsValidPermutation(const std::string& src, const std::string& dst) { + if (src.size() != dst.size()) { + return false; + } + + std::map characters; + + // Every character in `src` must be present only once + for (const auto c : src) { + if (characters[c]) { + return false; + } + characters[c] = true; + } + + // Every character in `dst` must show up in `src` exactly once + for (const auto c : dst) { + if (!characters[c]) { + return false; + } + characters[c] = false; + } + + // At this point, characters[] has been switched to true and false exactly + // once for all character in `src` (and `dst`) so we have a valid permutation + return true; +} + template class DataFormatDimMapOp : public OpKernel { public: @@ -37,15 +73,20 @@ class DataFormatDimMapOp : public OpKernel { OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); - OP_REQUIRES(context, src_format.size() == 4, - errors::InvalidArgument(strings::StrCat( - "Source format must of length 4, received src_format = ", - src_format))); + OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, + errors::InvalidArgument( + "Source format must be of length 4 or 5, received " + "src_format = ", + src_format)); + OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, + errors::InvalidArgument("Destination format must be of length " + "4 or 5, received dst_format = ", + dst_format)); OP_REQUIRES( - context, dst_format.size() == 4, - errors::InvalidArgument(strings::StrCat( - "Destination format must of length 4, received dst_format = ", - dst_format))); + context, IsValidPermutation(src_format, dst_format), + errors::InvalidArgument( + "Destination and source format must determine a permutation, got ", + src_format, " and ", dst_format)); dst_idx_ = Tensor(DT_INT32, {static_cast(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { @@ -77,8 +118,22 @@ class DataFormatVecPermuteOp : public OpKernel { : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); + OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, + errors::InvalidArgument( + "Source format must be of length 4 or 5, received " + "src_format = ", + src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); + OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, + errors::InvalidArgument("Destination format must be of length " + "4 or 5, received dst_format = ", + dst_format)); + OP_REQUIRES( + context, IsValidPermutation(src_format, dst_format), + errors::InvalidArgument( + "Destination and source format must determine a permutation, got ", + src_format, " and ", dst_format)); src_format_ = src_format; dst_format_ = dst_format; } @@ -124,6 +179,10 @@ class DataFormatVecPermuteOp : public OpKernel { }; keep_only_spatial_dimensions(&src_format_str); keep_only_spatial_dimensions(&dst_format_str); + OP_REQUIRES(context, + src_format_str.size() == 2 && dst_format_str.size() == 2, + errors::InvalidArgument( + "Format specifier must contain H and W for 2D case")); } ComputeDstIndex(src_format_str, dst_format_str, input.dims(), &dst_idx); diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index bfe11b63eea8d1..345abc53546a6d 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -27,6 +27,7 @@ from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util @@ -1216,6 +1217,46 @@ def testArbitraryASCII(self): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testInvalidLength(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Source format must be of length 4 or 5"): + op = nn_ops.data_format_dim_map( + x, src_format="12345678", dst_format="87654321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateSrc(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1233", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateDst(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="3321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testExtraSpecifiers(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="5321") + with test_util.use_gpu(): + self.evaluate(op) + class DataFormatVectorPermuteTest(test_lib.TestCase): @@ -1317,6 +1358,60 @@ def testNCHWToNHWC2D(self): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]]) + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testInvalidLength(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Source format must be of length 4 or 5"): + op = nn_ops.data_format_vec_permute( + x, src_format="12345678", dst_format="87654321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateSrc(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1233", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateDst(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="3321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testExtraSpecifiers(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="5321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def test2DNoWH(self): + x = [[0, 1], [2, 3]] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Format specifier must contain H and W for 2D case"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + @test_util.run_all_in_graph_and_eager_modes class AvgPoolTest(test_lib.TestCase): From 211469d43feae91e122e098d49666c879382b7b9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Dec 2020 17:06:23 -0800 Subject: [PATCH 047/256] Mark `MemmappedTensorAllocator` as returning opaque handle. This allocator is used for `ImmutableConstantOp` and it returns a handle to the contents of a memory mapped file which is supposed to represent a tensor. For tensors of complex types (resources, variables and strings), allocators which are not marked as returning opaque handles will call placement new to initialize each element. This means writing to the buffer. However, in our case, the buffer is immutable and already contains the tensor data. Hence, writing to it is both destructive and causes a crash. PiperOrigin-RevId: 345786451 Change-Id: I46369c50fa60b3431709ffe068a728d3061f49c4 --- tensorflow/core/kernels/immutable_constant_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc index 0dd08c694eb6c5..1cfbdb82778913 100644 --- a/tensorflow/core/kernels/immutable_constant_op.cc +++ b/tensorflow/core/kernels/immutable_constant_op.cc @@ -62,6 +62,12 @@ class MemmappedTensorAllocator : public Allocator { void set_delete_on_deallocate() { delete_on_deallocate_ = true; } + // Make sure tensors or complex types (strings, variants, resources) don't get + // their constructor called via a placement new since that would require + // writing to immutable data. + // See also: tensorflow/core/framework/typed_allocator.h + bool AllocatesOpaqueHandle() const override { return true; } + private: std::unique_ptr memory_region_; // If there is an error during allocation we keep it in this status. From b95ccc06e049078a8deefcc2bfaadf56e1b1c87c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 7 Dec 2020 20:31:31 -0800 Subject: [PATCH 048/256] Prevent CHECK-fail in LSTM/GRU with zero-length input. PiperOrigin-RevId: 346239181 Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f --- tensorflow/stream_executor/cuda/cuda_dnn.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc index a97850bd8d5348..5ae19f27ec6d53 100644 --- a/tensorflow/stream_executor/cuda/cuda_dnn.cc +++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc @@ -1474,7 +1474,9 @@ class CudnnRnnSequenceTensorDescriptor static port::StatusOr Create( GpuExecutor* parent, int max_seq_length, int batch_size, int data_size, cudnnDataType_t data_type) { - CHECK_GT(max_seq_length, 0); + if (max_seq_length <= 0) { + return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); + } int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); @@ -1495,7 +1497,9 @@ class CudnnRnnSequenceTensorDescriptor const absl::Span& seq_lengths, bool time_major, cudnnDataType_t data_type) { #if CUDNN_VERSION >= 7201 - CHECK_GT(max_seq_length, 0); + if (max_seq_length <= 0) { + return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); + } int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); From 16bca6296a796108eb3b264c08a7d9ac2d04261b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 8 Dec 2020 09:31:57 -0800 Subject: [PATCH 049/256] Prevent unitialized memory access in `GraphConstructor::MakeEdge` The `MakeEdge` implementation assumes that there exists an output at `output_index` of `src` node and an input at `input_index` of `dst` node. However, if this is not the case this results in accessing data out of bounds. Because we are accessing an array that is a private member of a class and only in read only mode, this usually results only in unitialized memory access. However, it is reasonable to think that malicious users could manipulate these indexes to actually read data outside the class, thus resulting in information leakage and further exploits. PiperOrigin-RevId: 346343288 Change-Id: I2127da27c2023d27f26efd39afa6c853385cab6f --- tensorflow/core/common_runtime/graph_constructor.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/common_runtime/graph_constructor.cc b/tensorflow/core/common_runtime/graph_constructor.cc index ab5b086b25c55b..4456c28be9ea21 100644 --- a/tensorflow/core/common_runtime/graph_constructor.cc +++ b/tensorflow/core/common_runtime/graph_constructor.cc @@ -44,6 +44,7 @@ limitations under the License. #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/strings/scanner.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/public/version.h" @@ -1425,6 +1426,17 @@ void GraphConstructor::Undo() { Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst, int input_index) { + if (output_index >= src->num_outputs()) { + return errors::InvalidArgument( + "Output ", output_index, " of node ", src->name(), + " does not exist. Node only has ", src->num_outputs(), " outputs."); + } + if (input_index >= dst->num_inputs()) { + return errors::InvalidArgument( + "Input ", input_index, " of node ", dst->name(), + " does not exist. Node only has ", dst->num_inputs(), " inputs."); + } + DataType src_out = src->output_type(output_index); DataType dst_in = dst->input_type(input_index); if (!TypesCompatible(dst_in, src_out)) { From 948cde7077027bae320b16e08d080c3e175d189c Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Fri, 20 Nov 2020 04:00:33 +0000 Subject: [PATCH 050/256] Update PCRE library from 8.42 to 8.44 This PR updates PCRE library from 8.42 to 8.44. Note there is a CVS related to old 8.42 (https://nvd.nist.gov/vuln/detail/CVE-2019-20838#VulnChangeHistorySection) Signed-off-by: Yong Tang --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index c9615431456ac2..1262aff4182635 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -647,12 +647,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "pcre", build_file = clean_dep("//third_party:pcre.BUILD"), - sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5", - strip_prefix = "pcre-8.42", + sha256 = "aecafd4af3bd0f3935721af77b889d9024b2e01d96b58471bd91a3063fb47728", + strip_prefix = "pcre-8.44", system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", - "https://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.44.tar.gz", + "https://ftp.exim.org/pub/pcre/pcre-8.44.tar.gz", ], ) From a260612a4cc6f4cc9c2ffdd06408e94a96bf51fb Mon Sep 17 00:00:00 2001 From: Austin Anderson Date: Wed, 9 Dec 2020 15:16:22 -0800 Subject: [PATCH 051/256] Pin pip version and fix apt-get --- .../tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile | 6 ++---- tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile | 2 +- .../dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile | 6 ++---- .../tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile | 2 +- .../dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile | 6 ++---- .../tools/dockerfiles/dockerfiles/devel-gpu.Dockerfile | 2 +- .../tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile | 6 ++---- tensorflow/tools/dockerfiles/dockerfiles/gpu.Dockerfile | 2 +- .../mkl_horovod/devel-horovod-jupyter.Dockerfile | 6 ++---- .../dockerfiles/mkl_horovod/devel-horovod.Dockerfile | 2 +- .../dockerfiles/mkl_horovod/horovod-jupyter.Dockerfile | 6 ++---- .../dockerfiles/dockerfiles/mkl_horovod/horovod.Dockerfile | 2 +- .../dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile | 6 ++---- .../dockerfiles/dockerfiles/ppc64le/cpu-ppc64le.Dockerfile | 2 +- .../ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile | 6 ++---- .../dockerfiles/ppc64le/devel-cpu-ppc64le.Dockerfile | 2 +- .../ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile | 6 ++---- .../dockerfiles/ppc64le/devel-gpu-ppc64le.Dockerfile | 2 +- .../dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile | 6 ++---- .../dockerfiles/dockerfiles/ppc64le/gpu-ppc64le.Dockerfile | 2 +- .../tools/dockerfiles/partials/jupyter.partial.Dockerfile | 4 +--- .../dockerfiles/partials/ubuntu/python.partial.Dockerfile | 2 +- 22 files changed, 32 insertions(+), 54 deletions(-) diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile index 107d1b426c1722..deec0d21cb0c15 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -60,9 +60,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile index e83592c5fd2408..e12571e34c1f22 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile index 78ec4416f47bc1..a496ad79df2cc4 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -111,9 +111,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile index 018b7bb35bac12..4973ddd8026349 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile index b99c384fe20e38..d7eca09e5a322e 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile @@ -104,7 +104,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -153,9 +153,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu.Dockerfile index 4493964cffc523..9602892bbf456e 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu.Dockerfile @@ -104,7 +104,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile index d4d913ce34a1c0..05f49d31fce62c 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile @@ -82,7 +82,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -109,9 +109,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/gpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/gpu.Dockerfile index f563f2fc909804..4730a0dc145edf 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/gpu.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/gpu.Dockerfile @@ -82,7 +82,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod-jupyter.Dockerfile index 5ed856259a9170..baa5e7ce863562 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod-jupyter.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -163,9 +163,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod.Dockerfile index a4a0bee0bc6b5c..0dfa5764537c02 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/devel-horovod.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod-jupyter.Dockerfile index 00c21e287f1393..68a1e3a432f31f 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod-jupyter.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -112,9 +112,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod.Dockerfile index bef75f1e495432..c76e143b1850d0 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/mkl_horovod/horovod.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile index 0a284f4dcb07e9..16163aeb1e538b 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -78,9 +78,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le.Dockerfile index 831e5aead0511d..cbcd2e0a8e00ee 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile index 14ae948c31a1a4..a222f8d51e5dff 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -112,9 +112,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le.Dockerfile index c098b863eaa03c..81c67d90983de0 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le.Dockerfile @@ -62,7 +62,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile index 1967c20419c03d..5dae92c82e3975 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile @@ -104,7 +104,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -154,9 +154,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le.Dockerfile index ffd74c52efa1c8..b8325567d4a413 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le.Dockerfile @@ -104,7 +104,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile index 6ef081013047f5..7de36582fc3554 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile @@ -82,7 +82,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary @@ -127,9 +127,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le.Dockerfile index f10e9f95182224..4e43bdc638a4e8 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le.Dockerfile @@ -82,7 +82,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary diff --git a/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile index cd84872a9864d7..49905e7289a7da 100644 --- a/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile +++ b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile @@ -5,9 +5,7 @@ RUN jupyter serverextension enable --py jupyter_http_over_ws RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local -RUN apt-get install -y --no-install-recommends wget -# some examples require git to fetch dependencies -RUN apt-get install -y --no-install-recommends git +RUN apt-get update && apt-get install -y --no-install-recommends wget git WORKDIR /tf/tensorflow-tutorials RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb diff --git a/tensorflow/tools/dockerfiles/partials/ubuntu/python.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/ubuntu/python.partial.Dockerfile index a3c07385cc892f..6318a5fb7ed11c 100644 --- a/tensorflow/tools/dockerfiles/partials/ubuntu/python.partial.Dockerfile +++ b/tensorflow/tools/dockerfiles/partials/ubuntu/python.partial.Dockerfile @@ -6,7 +6,7 @@ RUN apt-get update && apt-get install -y \ python3-pip RUN python3 -m pip --no-cache-dir install --upgrade \ - pip \ + "pip<20.3" \ setuptools # Some TF tools expect a "python" binary From 62cdfd542ca8bfba5aa30d06e4b34444146bf40e Mon Sep 17 00:00:00 2001 From: Ruoxin Sang Date: Tue, 18 Aug 2020 13:24:04 -0700 Subject: [PATCH 052/256] Don't do fused average updates inside XLA context as it may create extra tf.cond which causes OOM on TPUs. PiperOrigin-RevId: 327294174 Change-Id: I7caa62d77e5c86a6afe7aaca22c7231d8f2304b6 --- .../python/keras/layers/normalization.py | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py index e5723a3ef98353..9ab606d8038785 100644 --- a/tensorflow/python/keras/layers/normalization.py +++ b/tensorflow/python/keras/layers/normalization.py @@ -30,12 +30,12 @@ from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables as tf_variables -from tensorflow.python.platform import device_context from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @@ -514,7 +514,7 @@ def _fused_batch_norm(self, inputs, training): use_fused_avg_updates = ( ops.executing_eagerly_outside_functions() and isinstance(self.momentum, (float, int)) and - device_context.enclosing_tpu_context() is None) + enclosing_xla_context() is None) if use_fused_avg_updates: exponential_avg_factor = 1.0 - self.momentum else: @@ -930,6 +930,23 @@ def replace_in_base_docstring(replacements): return string +def enclosing_xla_context(): + """Recursively find and return the XLAControlFlowContext.""" + graph = ops.get_default_graph() + while graph is not None: + # pylint: disable=protected-access + context_ = graph._get_control_flow_context() + # pylint: enable=protected-access + while context_ is not None: + if isinstance(context_, control_flow_ops.XLAControlFlowContext): + return context_ + context_ = context_.outer_context + # This may be a FuncGraph due to defuns or v2 control flow. We need to + # find the original graph with the XLAControlFlowContext. + graph = getattr(graph, 'outer_graph', None) + return None + + @keras_export(v1=['keras.layers.BatchNormalization']) # pylint: disable=missing-docstring class BatchNormalization(BatchNormalizationBase): From 36cb57fc7545d7c22bf1d68d6cca69f03de2ed79 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 15 Dec 2020 15:18:59 -0800 Subject: [PATCH 053/256] Disable a few tests. These tests now segfault after some dependency updated below us. --- tensorflow/python/feature_column/BUILD | 5 +- tensorflow/python/kernel_tests/BUILD | 1 + .../python/kernel_tests/boosted_trees/BUILD | 3 + tensorflow/tools/pip_package/setup.py.orig | 313 ++++++++++++++++++ 4 files changed, 321 insertions(+), 1 deletion(-) create mode 100644 tensorflow/tools/pip_package/setup.py.orig diff --git a/tensorflow/python/feature_column/BUILD b/tensorflow/python/feature_column/BUILD index b3c6e061c22460..a2e7082219abd1 100644 --- a/tensorflow/python/feature_column/BUILD +++ b/tensorflow/python/feature_column/BUILD @@ -231,7 +231,10 @@ py_test( srcs = ["sequence_feature_column_integration_test.py"], python_version = "PY3", srcs_version = "PY2AND3", - tags = ["no_pip"], + tags = [ + "no_mac", + "no_pip", + ], deps = [ ":feature_column_v2", "//tensorflow/python:client_testlib", diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index d37b928ad79211..874482d3339565 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -801,6 +801,7 @@ tf_py_test( "//tensorflow/python:platform", "//third_party/py/numpy", ], + tags = ["no_mac"], ) tf_py_test( diff --git a/tensorflow/python/kernel_tests/boosted_trees/BUILD b/tensorflow/python/kernel_tests/boosted_trees/BUILD index 5b318324d4cd2e..cbb60de7b9cf13 100644 --- a/tensorflow/python/kernel_tests/boosted_trees/BUILD +++ b/tensorflow/python/kernel_tests/boosted_trees/BUILD @@ -33,6 +33,7 @@ tf_py_test( "//tensorflow/python:training", "//tensorflow/python:variables", ], + tags = ["no_mac"], ) tf_py_test( @@ -47,6 +48,7 @@ tf_py_test( "//tensorflow/python:framework_test_lib", "//tensorflow/python:resources", ], + tags = ["no_mac"], ) tf_py_test( @@ -77,6 +79,7 @@ tf_py_test( "//tensorflow/python:framework_test_lib", "//tensorflow/python:resources", ], + tags = ["no_mac"], ) tf_py_test( diff --git a/tensorflow/tools/pip_package/setup.py.orig b/tensorflow/tools/pip_package/setup.py.orig new file mode 100644 index 00000000000000..6f158a8c84db91 --- /dev/null +++ b/tensorflow/tools/pip_package/setup.py.orig @@ -0,0 +1,313 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow is an open source machine learning framework for everyone. + +TensorFlow is an open source software library for high performance numerical +computation. Its flexible architecture allows easy deployment of computation +across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters +of servers to mobile and edge devices. + +Originally developed by researchers and engineers from the Google Brain team +within Google's AI organization, it comes with strong support for machine +learning and deep learning and the flexible numerical computation core is used +across many other scientific domains. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import fnmatch +import os +import re +import sys + +from setuptools import Command +from setuptools import find_packages +from setuptools import setup +from setuptools.command.install import install as InstallCommandBase +from setuptools.dist import Distribution + +DOCLINES = __doc__.split('\n') + +# This version string is semver compatible, but incompatible with pip. +# For pip, we will remove all '-' characters from this string, and use the +# result for pip. +# Also update tensorflow/tensorflow.bzl and +# tensorflow/core/public/version.h +_VERSION = '2.1.2' + +REQUIRED_PACKAGES = [ + 'absl-py >= 0.7.0', + 'astor >= 0.6.0', + 'backports.weakref >= 1.0rc1;python_version<"3.4"', + 'enum34 >= 1.1.6;python_version<"3.4"', + 'gast == 0.2.2', + 'google_pasta >= 0.1.6', + 'keras_applications >= 1.0.8', + 'keras_preprocessing == 1.1.0', + 'numpy >= 1.16.0, < 1.19.0', + 'opt_einsum >= 2.3.2', + 'protobuf >= 3.8.0', + 'tensorboard >= 2.1.0, < 2.2.0', + 'tensorflow_estimator >= 2.1.0rc0, < 2.2.0', + 'termcolor >= 1.1.0', + 'wrapt >= 1.11.1', + # python3 requires wheel 0.26 + 'wheel >= 0.26;python_version>="3"', + 'wheel;python_version<"3"', +<<<<<<< HEAD + # mock comes with unittest.mock for python3, need to install for python2 + 'mock >= 2.0.0;python_version<"3"', + # functools comes with python3, need to install the backport for python2 + 'functools32 >= 3.2.3;python_version<"3"', + 'six >= 1.12.0', +======= + 'wrapt >= 1.11.1', + # Pin h5py to at most 2.10.0 as newer versions break old keras tests + 'h5py <= 2.10.0', +>>>>>>> 03d7ca7871b (Add upper bound to `h5py`.) +] + +if sys.byteorder == 'little': + # grpcio does not build correctly on big-endian machines due to lack of + # BoringSSL support. + # See https://github.com/tensorflow/tensorflow/issues/17882. + REQUIRED_PACKAGES.append('grpcio >= 1.8.6') + +project_name = 'tensorflow' +if '--project_name' in sys.argv: + project_name_idx = sys.argv.index('--project_name') + project_name = sys.argv[project_name_idx + 1] + sys.argv.remove('--project_name') + sys.argv.pop(project_name_idx) + +# tf-nightly should depend on tb-nightly +if 'tf_nightly' in project_name: + for i, pkg in enumerate(REQUIRED_PACKAGES): + if 'tensorboard' in pkg: + REQUIRED_PACKAGES[i] = 'tb-nightly >= 2.1.0a0, < 2.2.0a0' + elif 'tensorflow_estimator' in pkg and '2.0' in project_name: + REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview' + elif 'tensorflow_estimator' in pkg: + REQUIRED_PACKAGES[i] = 'tf-estimator-nightly' + +# pylint: disable=line-too-long +CONSOLE_SCRIPTS = [ + 'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main', + 'tflite_convert = tensorflow.lite.python.tflite_convert:main', + 'toco = tensorflow.lite.python.tflite_convert:main', + 'saved_model_cli = tensorflow.python.tools.saved_model_cli:main', + # We need to keep the TensorBoard command, even though the console script + # is now declared by the tensorboard pip package. If we remove the + # TensorBoard command, pip will inappropriately remove it during install, + # even though the command is not removed, just moved to a different wheel. + 'tensorboard = tensorboard.main:run_main', + 'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main', + 'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main', +] +# pylint: enable=line-too-long + +# Only keep freeze_graph console script in 1.X. +if _VERSION.startswith('1.') and '_2.0' not in project_name: + CONSOLE_SCRIPTS.append( + 'freeze_graph = tensorflow.python.tools.freeze_graph:run_main') + +# remove the tensorboard console script if building tf_nightly +if 'tf_nightly' in project_name: + CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main') + +TEST_PACKAGES = [ + 'scipy >= 0.15.1', +] + + +class BinaryDistribution(Distribution): + + def has_ext_modules(self): + return True + + +class InstallCommand(InstallCommandBase): + """Override the dir where the headers go.""" + + def finalize_options(self): + ret = InstallCommandBase.finalize_options(self) + self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core', + 'include') + self.install_lib = self.install_platlib + return ret + + +class InstallHeaders(Command): + """Override how headers are copied. + + The install_headers that comes with setuptools copies all files to + the same directory. But we need the files to be in a specific directory + hierarchy for -I to work correctly. + """ + description = 'install C/C++ header files' + + user_options = [('install-dir=', 'd', + 'directory to install header files to'), + ('force', 'f', + 'force installation (overwrite existing files)'), + ] + + boolean_options = ['force'] + + def initialize_options(self): + self.install_dir = None + self.force = 0 + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', + ('install_headers', 'install_dir'), + ('force', 'force')) + + def mkdir_and_copy_file(self, header): + install_dir = os.path.join(self.install_dir, os.path.dirname(header)) + # Get rid of some extra intervening directories so we can have fewer + # directories for -I + install_dir = re.sub('/google/protobuf_archive/src', '', install_dir) + install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/', + install_dir) + + # Copy external code headers into tensorflow_core/include. + # A symlink would do, but the wheel file that gets created ignores + # symlink within the directory hierarchy. + # NOTE(keveman): Figure out how to customize bdist_wheel package so + # we can do the symlink. + external_header_locations = [ + 'tensorflow_core/include/external/eigen_archive/', + 'tensorflow_core/include/external/com_google_absl/', + ] + for location in external_header_locations: + if location in install_dir: + extra_dir = install_dir.replace(location, '') + if not os.path.exists(extra_dir): + self.mkpath(extra_dir) + self.copy_file(header, extra_dir) + + if not os.path.exists(install_dir): + self.mkpath(install_dir) + return self.copy_file(header, install_dir) + + def run(self): + hdrs = self.distribution.headers + if not hdrs: + return + + self.mkpath(self.install_dir) + for header in hdrs: + (out, _) = self.mkdir_and_copy_file(header) + self.outfiles.append(out) + + def get_inputs(self): + return self.distribution.headers or [] + + def get_outputs(self): + return self.outfiles + + +def find_files(pattern, root): + """Return all the files matching pattern below root dir.""" + for dirpath, _, files in os.walk(root): + for filename in fnmatch.filter(files, pattern): + yield os.path.join(dirpath, filename) + + +so_lib_paths = [ + i for i in os.listdir('.') + if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*') +] + +matches = [] +for path in so_lib_paths: + matches.extend( + ['../' + x for x in find_files('*', path) if '.py' not in x] + ) + +if os.name == 'nt': + EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd' +else: + EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so' + +headers = ( + list(find_files('*.h', 'tensorflow_core/core')) + + list(find_files('*.h', 'tensorflow_core/stream_executor')) + + list(find_files('*.h', 'google/com_google_protobuf/src')) + + list(find_files('*.inc', 'google/com_google_protobuf/src')) + + list(find_files('*', 'third_party/eigen3')) + list( + find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) + + list( + find_files('*.inc', 'tensorflow_core/include/external/com_google_absl')) + + list(find_files('*', 'tensorflow_core/include/external/eigen_archive'))) + +setup( + name=project_name, + version=_VERSION.replace('-', ''), + description=DOCLINES[0], + long_description='\n'.join(DOCLINES[2:]), + url='https://www.tensorflow.org/', + download_url='https://github.com/tensorflow/tensorflow/tags', + author='Google Inc.', + author_email='packages@tensorflow.org', + # Contained modules and scripts. + packages=find_packages(), + entry_points={ + 'console_scripts': CONSOLE_SCRIPTS, + }, + headers=headers, + install_requires=REQUIRED_PACKAGES, + tests_require=REQUIRED_PACKAGES + TEST_PACKAGES, + # Add in any packaged data. + include_package_data=True, + package_data={ + 'tensorflow': [ + EXTENSION_NAME, + ] + matches, + }, + zip_safe=False, + distclass=BinaryDistribution, + cmdclass={ + 'install_headers': InstallHeaders, + 'install': InstallCommand, + }, + # PyPI package information. + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Mathematics', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + license='Apache 2.0', + keywords='tensorflow tensor machine learning', +) From 6265eea3c68159bafa53d5a96403ffb9e7fd2575 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 15 Dec 2020 17:09:16 -0800 Subject: [PATCH 054/256] Fix sanity build --- tensorflow/python/kernel_tests/BUILD | 2 +- tensorflow/python/kernel_tests/boosted_trees/BUILD | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index 874482d3339565..efad3df7c1a3de 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -790,6 +790,7 @@ tf_py_test( name = "parsing_ops_test", size = "medium", srcs = ["parsing_ops_test.py"], + tags = ["no_mac"], deps = [ "//tensorflow/core:protos_all_py", "//tensorflow/python:array_ops", @@ -801,7 +802,6 @@ tf_py_test( "//tensorflow/python:platform", "//third_party/py/numpy", ], - tags = ["no_mac"], ) tf_py_test( diff --git a/tensorflow/python/kernel_tests/boosted_trees/BUILD b/tensorflow/python/kernel_tests/boosted_trees/BUILD index cbb60de7b9cf13..68b27849773bf7 100644 --- a/tensorflow/python/kernel_tests/boosted_trees/BUILD +++ b/tensorflow/python/kernel_tests/boosted_trees/BUILD @@ -24,6 +24,7 @@ tf_py_test( name = "resource_ops_test", size = "small", srcs = ["resource_ops_test.py"], + tags = ["no_mac"], deps = [ "//tensorflow/core/kernels/boosted_trees:boosted_trees_proto_py", "//tensorflow/python:boosted_trees_ops", @@ -33,13 +34,13 @@ tf_py_test( "//tensorflow/python:training", "//tensorflow/python:variables", ], - tags = ["no_mac"], ) tf_py_test( name = "prediction_ops_test", size = "small", srcs = ["prediction_ops_test.py"], + tags = ["no_mac"], deps = [ "//tensorflow/core/kernels/boosted_trees:boosted_trees_proto_py", "//tensorflow/python:array_ops", @@ -48,7 +49,6 @@ tf_py_test( "//tensorflow/python:framework_test_lib", "//tensorflow/python:resources", ], - tags = ["no_mac"], ) tf_py_test( @@ -71,6 +71,7 @@ tf_py_test( name = "training_ops_test", size = "small", srcs = ["training_ops_test.py"], + tags = ["no_mac"], deps = [ "//tensorflow/core/kernels/boosted_trees:boosted_trees_proto_py", "//tensorflow/python:array_ops", @@ -79,7 +80,6 @@ tf_py_test( "//tensorflow/python:framework_test_lib", "//tensorflow/python:resources", ], - tags = ["no_mac"], ) tf_py_test( From 001d766a4b068dfc1a2396e80e32f146b14e9daa Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Wed, 16 Dec 2020 16:35:03 -0800 Subject: [PATCH 055/256] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 5dc9456da9c8b1..40b4578d646d5a 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.3.2 + + + # Release 2.3.1 ## Bug Fixes and Other Changes From e9cbe6a5f4294786ba4bae9f157310c2b5ab6123 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Wed, 16 Dec 2020 16:48:59 -0800 Subject: [PATCH 056/256] Update version numbers to 2.3.2 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 2e3bf62f6089a9..7c34667b2ef9a4 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 3 -#define TF_PATCH_VERSION 1 +#define TF_PATCH_VERSION 2 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 3ba217c49309ce..ac839f3cbe0e3b 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -59,7 +59,7 @@ load( # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.3.1" +VERSION = "2.3.2" VERSION_MAJOR = VERSION.split(".")[0] # Sanitize a dependency so that it works correctly from code that includes diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 594e74f40c0340..b7cde30d2e1f0d 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -49,7 +49,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.3.1' +_VERSION = '2.3.2' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 603164448be2839fbc955aa112f98ac763de6d4d Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 21 Dec 2020 08:14:56 -0800 Subject: [PATCH 057/256] Update SQLite to the lastest sqlite-amalgamation-3340000 This PR updates SQLite to the latest sqlite-amalgamation-3340000 Signed-off-by: Yong Tang --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 1262aff4182635..61ac9f791410de 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -409,12 +409,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6", - strip_prefix = "sqlite-amalgamation-3330000", + sha256 = "8ff0b79fd9118af7a760f1f6a98cac3e69daed325c8f9f0a581ecb62f797fd64", + strip_prefix = "sqlite-amalgamation-3340000", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", - "https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3340000.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3340000.zip", ], ) From eedec23bd140363e84a4b0c82298b9fda4cba70d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 4 Jan 2021 12:17:48 -0800 Subject: [PATCH 058/256] Update RELEASE.md --- RELEASE.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 40b4578d646d5a..0e47a5b6601a96 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,29 @@ # Release 2.3.2 - +## Bug Fixes and Other Changes +* Fixes an access to unitialized memory in Eigen code + ([CVE-2020-26266](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26266)) +* Fixes a security vulnerability caused by lack of validation in + `tf.raw_ops.DataFormatVecPermute` and `tf.raw_ops.DataFormatDimMap` + ([CVE-2020-26267](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26267)) +* Fixes a vulnerability caused by attempting to write to immutable memory region in + `tf.raw_ops.ImmutableConst` + ([CVE-2020-26268](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26268) +* Fixes a `CHECK`-fail in LSTM with zero-length input + ([CVE-2020-26270](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26270)) +* Fixes a security vulnerability caused by accessing heap data outside of bounds + when loading a specially crafted `SavedModel` + ([CVE-2020-26271](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26271)) +* Solves an OOM issue on TPUs when XLA contexts use fused average updates +* Updates `libjpeg-turbo` to `2.0.5` to handle + [CVE-2020-13790](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13790). +* Updates `junit` to `4.13.1` to handle + [CVE-2020-15250](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15250). +* Updates `PCRE` to `8.44` to handle + [CVE-2019-20838](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-20838) + and + [CVE-2020-14155](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14155). +* Updates `sqlite3` to `3.44.0` to keep in sync with master branch. # Release 2.3.1 From 226601de13f92a387ce6cb6bb722c399098e10a3 Mon Sep 17 00:00:00 2001 From: Austin Anderson Date: Tue, 18 May 2021 12:10:50 -0700 Subject: [PATCH 059/256] Add .zenodo.json for clean automated DOI numbers. See the link on the TensorFlow README for the DOI page. PiperOrigin-RevId: 374474720 Change-Id: I739c9fc95c03648c50c6a0fc1931308507cdf12c --- .zenodo.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .zenodo.json diff --git a/.zenodo.json b/.zenodo.json new file mode 100644 index 00000000000000..7161180c51ae3e --- /dev/null +++ b/.zenodo.json @@ -0,0 +1,13 @@ +{ + "description": "TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML-powered applications.", + "license": "Apache-2.0", + "title": "TensorFlow", + "upload_type": "software", + "creators": [ + { + "name": "TensorFlow Developers" + } + ], + "access_right": "open", + "notes": "Specific TensorFlow versions can be found in the \"Versions\" list on the right side of this page.
See the full list of authors on GitHub." +} From 1d205a0cd3438f206bd503dbf91e39c0148c5de3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:18:51 -0700 Subject: [PATCH 060/256] Fix an invalid address vulnerability in `tf.raw_ops.RaggedBincount`. PiperOrigin-RevId: 368293153 Change-Id: I4b4e493d3fd05e7dc55a55de3a041a80a4f275c3 --- tensorflow/core/kernels/bincount_op.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tensorflow/core/kernels/bincount_op.cc b/tensorflow/core/kernels/bincount_op.cc index a84b25f2541013..c75f67bb56009a 100644 --- a/tensorflow/core/kernels/bincount_op.cc +++ b/tensorflow/core/kernels/bincount_op.cc @@ -414,6 +414,15 @@ class RaggedBincountOp : public OpKernel { int num_values = values.size(); int batch_idx = 0; + OP_REQUIRES(ctx, splits(0) == 0, + errors::InvalidArgument("Splits must start with 0, not with ", + splits(0))); + + OP_REQUIRES(ctx, splits(num_rows) == num_values, + errors::InvalidArgument( + "Splits must end with the number of values, got ", + splits(num_rows), " instead of ", num_values)); + Tensor* out_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); From 616a3c1ba0a80ba17f6ce832b3125720b35281b9 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 8 Feb 2021 12:29:30 -0800 Subject: [PATCH 061/256] Allowlist certain data types to avoid a seg fault. PiperOrigin-RevId: 356326671 Change-Id: I23b65b52e93798cb5a6744632d31b0f88c6b6b31 --- tensorflow/core/kernels/immutable_constant_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc index 1cfbdb82778913..19aa865c1fbe4d 100644 --- a/tensorflow/core/kernels/immutable_constant_op.cc +++ b/tensorflow/core/kernels/immutable_constant_op.cc @@ -17,6 +17,8 @@ limitations under the License. #include +#include "tensorflow/core/framework/types.pb.h" + namespace tensorflow { namespace { @@ -86,6 +88,9 @@ ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context) OP_REQUIRES_OK(context, context->GetAttr(kMemoryRegionNameAttr, ®ion_name_)); OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_)); + OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT, + errors::InvalidArgument( + "Resource and variant dtypes are invalid for this op.")); OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_)); } From 38e984039c904fa31b0ac234192a72d4b3209a4b Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Tue, 18 May 2021 18:09:13 -0700 Subject: [PATCH 062/256] CherryPick:2.3:PR #46974: Fix crash of tf.strings.substr when pos and len have different shapes --- tensorflow/core/kernels/substr_op.cc | 5 +++++ tensorflow/python/kernel_tests/substr_op_test.py | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/tensorflow/core/kernels/substr_op.cc b/tensorflow/core/kernels/substr_op.cc index e382381e122324..ab83efda2a2e17 100644 --- a/tensorflow/core/kernels/substr_op.cc +++ b/tensorflow/core/kernels/substr_op.cc @@ -51,6 +51,11 @@ class SubstrOp : public OpKernel { const Tensor& len_tensor = context->input(2); const TensorShape& input_shape = input_tensor.shape(); const TensorShape& pos_shape = pos_tensor.shape(); + const TensorShape& len_shape = len_tensor.shape(); + OP_REQUIRES(context, (pos_shape == len_shape), + errors::InvalidArgument( + "pos and len should have the same shape, got: ", + pos_shape.DebugString(), " vs. ", len_shape.DebugString())); bool is_scalar = TensorShapeUtils::IsScalar(pos_shape); diff --git a/tensorflow/python/kernel_tests/substr_op_test.py b/tensorflow/python/kernel_tests/substr_op_test.py index 9302152e82bfa9..eae4e10f378567 100644 --- a/tensorflow/python/kernel_tests/substr_op_test.py +++ b/tensorflow/python/kernel_tests/substr_op_test.py @@ -492,6 +492,15 @@ def testInvalidUnit(self): with self.assertRaises(ValueError): string_ops.substr(b"test", 3, 1, unit="UTF8") + def testInvalidPos(self): + # Test case for GitHub issue 46900. + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + x = string_ops.substr(b"abc", len=1, pos=[1, -1]) + self.evaluate(x) + + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + x = string_ops.substr(b"abc", len=1, pos=[1, 2]) + self.evaluate(x) if __name__ == "__main__": test.main() From 16fe89a3a12b70e6a798afdd4dda01d94d4deafc Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 2 Mar 2021 17:02:03 -0800 Subject: [PATCH 063/256] Fix segfaults in `tf.raw_ops.SparseCountSparseOutput`. PiperOrigin-RevId: 360547563 Change-Id: I781c7af4b54a63d867c6e18d43a44d64a5c4e7c9 --- tensorflow/core/kernels/count_ops.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index 087deef0812f00..d6ab68c2c70bd3 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -192,6 +192,10 @@ class SparseCount : public OpKernel { "; values shape: ", values.shape().DebugString())); } + OP_REQUIRES(context, shape.NumElements() != 0, + errors::InvalidArgument( + "The shape argument requires at least one element.")); + bool is_1d = shape.NumElements() == 1; int num_batches = is_1d ? 1 : shape.flat()(0); int num_values = values.NumElements(); @@ -212,6 +216,14 @@ class SparseCount : public OpKernel { for (int idx = 0; idx < num_values; ++idx) { int batch = is_1d ? 0 : indices_values(idx, 0); + if (batch >= num_batches) { + OP_REQUIRES(context, batch < num_batches, + errors::InvalidArgument( + "Indices value along the first dimension must be ", + "lower than the first index of the shape.", "Got ", + batch, " as batch and ", num_batches, + " as the first dimension of the shape.")); + } const auto& value = values_values(idx); if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) { if (binary_output_) { From a26c43cebfeb622df23e5e55f5731ffc5dcdf62c Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:25:01 -0700 Subject: [PATCH 064/256] Fix `tf.raw_ops.ResourceCountUpTo` null pointer dereference. PiperOrigin-RevId: 368294347 Change-Id: I2c16fbfc9b4966c402c3d8e311f0d665a9c852d8 --- tensorflow/python/lib/core/ndarray_tensor.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/python/lib/core/ndarray_tensor.cc b/tensorflow/python/lib/core/ndarray_tensor.cc index 2afd2888e8fd3e..5f1bfc8c7485bb 100644 --- a/tensorflow/python/lib/core/ndarray_tensor.cc +++ b/tensorflow/python/lib/core/ndarray_tensor.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/python/lib/core/ndarray_tensor.h" #include +#include #include "tensorflow/c/eager/tfe_context_internal.h" #include "tensorflow/c/tf_tensor_internal.h" @@ -74,6 +75,13 @@ Status PyArrayDescr_to_TF_DataType(PyArray_Descr* descr, PyObject* key; PyObject* value; Py_ssize_t pos = 0; + + // Return an error if the fields attribute is null. + // Occurs with an improper conversion attempt to resource. + if (descr->fields == nullptr) { + return errors::Internal("Unexpected numpy data type"); + } + if (PyDict_Next(descr->fields, &pos, &key, &value)) { // In Python 3, the keys of numpy custom struct types are unicode, unlike // Python 2, where the keys are bytes. From 22bb02d16a91cbbe347679c1e0f7019e8b12a73c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 17 Apr 2021 20:55:53 -0700 Subject: [PATCH 065/256] Validate `MatrixDiagV{2,3}` arguments to prevent breakage. PiperOrigin-RevId: 369056033 Change-Id: Ic2018c297d3dd6f252dc1dd3667f1ed5cb1eaa42 --- tensorflow/core/kernels/matrix_diag_op.cc | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/kernels/matrix_diag_op.cc b/tensorflow/core/kernels/matrix_diag_op.cc index 05d7e4e6f86752..016356dfa5336f 100644 --- a/tensorflow/core/kernels/matrix_diag_op.cc +++ b/tensorflow/core/kernels/matrix_diag_op.cc @@ -192,9 +192,22 @@ class MatrixDiagOp : public OpKernel { upper_diag_index = diag_index.flat()(1); } } - num_rows = context->input(2).flat()(0); - num_cols = context->input(3).flat()(0); - padding_value = context->input(4).flat()(0); + + auto& num_rows_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_rows_tensor.shape()), + errors::InvalidArgument("num_rows must be a scalar")); + num_rows = num_rows_tensor.flat()(0); + + auto& num_cols_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_cols_tensor.shape()), + errors::InvalidArgument("num_cols must be a scalar")); + num_cols = num_cols_tensor.flat()(0); + + auto& padding_value_tensor = context->input(4); + OP_REQUIRES(context, + TensorShapeUtils::IsScalar(padding_value_tensor.shape()), + errors::InvalidArgument("padding_value must be a scalar")); + padding_value = padding_value_tensor.flat()(0); } // Size validations. From 300d27b1a2a69e1b38b049a67f791d087972e9c3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 09:57:01 -0700 Subject: [PATCH 066/256] Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b --- .../core/grappler/optimizers/arithmetic_optimizer.cc | 11 +++++++++++ .../core/grappler/optimizers/dependency_optimizer.cc | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc index 520346b0166a33..2aeeed75ef0b22 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc @@ -2000,6 +2000,12 @@ class ReorderCastLikeAndValuePreserving : public ArithmeticOptimizerStage { Status TrySimplify(NodeDef* consumer, string* simplified_node_name) override { NodeDef* producer; + + if (consumer->input_size() < 1) { + return errors::FailedPrecondition("Node ", simplified_node_name, + " lacks inputs"); + } + TF_RETURN_IF_ERROR(GetInputNode(consumer->input(0), &producer)); const bool producer_is_cast = IsCastLike(*producer); const bool can_optimize = @@ -2402,6 +2408,11 @@ class ReplaceMulWithSquare : public ArithmeticOptimizerStage { ~ReplaceMulWithSquare() override = default; bool IsSupported(const NodeDef* node) const override { + if (!node || node->input_size() < 2) { + // Invalid node + return false; + } + return IsAnyMul(*node) && node->input(0) == node->input(1); } diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc index 58ef14e3d3d60f..1febfc01e2d741 100644 --- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc @@ -68,6 +68,12 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const { // The output values of this node may be needed. return false; } + + if (node.input_size() < 1) { + // Node lacks input, is invalid + return false; + } + const NodeDef* input = node_map_->GetNode(NodeName(node.input(0))); CHECK(input != nullptr) << "node = " << node.name() << " input = " << node.input(0); From 6df14c6f0d6dedfb0de0156e082b5d35ce7646f6 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:24:00 -0700 Subject: [PATCH 067/256] Fix and null pointer dereferences. --- tensorflow/core/kernels/session_ops.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc index e7e73549bc32f3..2403fad55a43ed 100644 --- a/tensorflow/core/kernels/session_ops.cc +++ b/tensorflow/core/kernels/session_ops.cc @@ -118,7 +118,11 @@ class GetSessionTensorOp : public OpKernel { const Tensor& handle = ctx->input(0); const string& name = handle.scalar()(); Tensor val; - OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "GetSessionTensor called on null session state")); + OP_REQUIRES_OK(ctx, session_state->GetTensor(name, &val)); ctx->set_output(0, val); } @@ -160,7 +164,11 @@ class DeleteSessionTensorOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar()(); - OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "DeleteSessionTensor called on null session state")); + OP_REQUIRES_OK(ctx, session_state->DeleteTensor(name)); } TF_DISALLOW_COPY_AND_ASSIGN(DeleteSessionTensorOp); From 1bcb7f181ba7f26e093f16c383d1812032c78b65 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 15 Apr 2021 13:28:49 -0700 Subject: [PATCH 068/256] Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK. PiperOrigin-RevId: 368706628 Change-Id: I5c9ea4833f38835ee183ca50d63251dc89c9f3bc --- .../kernels/ragged_tensor_to_tensor_op.cc | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index d729c43f25a4b8..d409d1b337f280 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -208,7 +208,7 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } void CalculateOutputIndexRowSplit( - const RowPartitionTensor& row_split, + OpKernelContext* context, const RowPartitionTensor& row_split, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -233,7 +233,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } } if (row_split_size > 0) { - DCHECK_EQ(result->size(), row_split(row_split_size - 1)); + OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), + errors::InvalidArgument("Invalid row split size.")); } } @@ -259,7 +260,7 @@ class RaggedTensorToTensorBaseOp : public OpKernel { // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] void CalculateOutputIndexValueRowID( - const RowPartitionTensor& value_rowids, + OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -293,7 +294,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } result->push_back(current_output_index); } - DCHECK_EQ(result->size(), value_rowids.size()); + OP_REQUIRES(context, result->size() == value_rowids.size(), + errors::InvalidArgument("Invalid row ids.")); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, @@ -307,13 +309,13 @@ class RaggedTensorToTensorBaseOp : public OpKernel { switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( - row_partition_tensor, parent_output_index, output_index_multiplier, - output_size, result); + context, row_partition_tensor, parent_output_index, + output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: - CalculateOutputIndexRowSplit(row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, - result); + CalculateOutputIndexRowSplit( + context, row_partition_tensor, parent_output_index, + output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( From 98823115253b14ce6bf0e327593907e6c984af98 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:54:18 -0700 Subject: [PATCH 069/256] Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK in `tensor.cc`. PiperOrigin-RevId: 368300502 Change-Id: I91255d23c4bfd3aa3c029aac773937c09daf3c64 --- tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index d729c43f25a4b8..28f797dc74b778 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -345,6 +345,11 @@ class RaggedTensorToTensorBaseOp : public OpKernel { void Compute(OpKernelContext* context) override { INDEX_TYPE first_dimension; + const Tensor first_partition_tensor = + context->input(kFirstPartitionInputIndex); + OP_REQUIRES(context, first_partition_tensor.NumElements() > 0, + errors::InvalidArgument("Invalid first partition input. Tensor " + "requires at least one element.")); OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension)); vector output_size; OP_REQUIRES_OK(context, From 3d73571f03aad57af7a16b897093c6dbda98793a Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 15 Apr 2021 13:03:19 -0700 Subject: [PATCH 070/256] Fix `tf.raw_ops.SparseCross` failing CHECK. PiperOrigin-RevId: 368701671 Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d --- tensorflow/core/kernels/sparse_cross_op.cc | 55 +++++++++++++++++++--- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 9a80aad5d04fae..78e7561825a8a1 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/fingerprint.h" @@ -460,10 +461,19 @@ int64 CalculateBatchSize(const OpInputList& shapes_list_in, Status ValidateInput(const OpInputList& indices_list_in, const OpInputList& values_list_in, const OpInputList& shapes_list_in, - const OpInputList& dense_list_in) { + const OpInputList& dense_list_in, + const DataType& internal_type) { const auto size = indices_list_in.size(); + // Only perform internal_type check for SparseCrossOp. + // Check if the internal_type is not invalid before doing so. + bool check_type = internal_type != DT_INVALID; // Validates indices_list_in OpInputList. for (int i = 0; i < size; i++) { + if (check_type && indices_list_in[i].dtype() != DT_INT64) { + return errors::InvalidArgument("Input indices should be of type ", + DT_INT64, " but received ", + indices_list_in[i].dtype()); + } if (!TensorShapeUtils::IsMatrix(indices_list_in[i].shape())) { return errors::InvalidArgument( "Input indices should be a matrix but received shape ", @@ -482,6 +492,14 @@ Status ValidateInput(const OpInputList& indices_list_in, values_list_in.size()); } for (int i = 0; i < size; i++) { + // Make sure to avoid the expected type to be string, but input values to be + // int64. + if (check_type && internal_type == DT_STRING && + values_list_in[i].dtype() == DT_INT64) { + return errors::InvalidArgument("Input values should be of internal type ", + internal_type, " but received ", + values_list_in[i].dtype()); + } if (!TensorShapeUtils::IsVector(values_list_in[i].shape())) { return errors::InvalidArgument( "Input values should be a vector but received shape ", @@ -502,6 +520,11 @@ Status ValidateInput(const OpInputList& indices_list_in, shapes_list_in.size()); } for (int i = 0; i < size; i++) { + if (check_type && shapes_list_in[i].dtype() != DT_INT64) { + return errors::InvalidArgument("Input shape should be of type ", DT_INT64, + " but received ", + shapes_list_in[i].dtype()); + } if (!TensorShapeUtils::IsVector(shapes_list_in[i].shape())) { return errors::InvalidArgument( "Input shapes should be a vector but received shape ", @@ -517,6 +540,14 @@ Status ValidateInput(const OpInputList& indices_list_in, // Validates dense_list_in OpInputList for (int i = 0; i < dense_list_in.size(); ++i) { + // Make sure to avoid the expected type to be string, but input values to be + // int64. + if (check_type && internal_type == DT_STRING && + dense_list_in[i].dtype() == DT_INT64) { + return errors::InvalidArgument("Dense inputs should be of internal type ", + internal_type, " but received ", + dense_list_in[i].dtype()); + } if (!TensorShapeUtils::IsMatrix(dense_list_in[i].shape())) { return errors::InvalidArgument( "Dense inputs should be a matrix but received shape ", @@ -698,6 +729,7 @@ class SparseCrossOp : public OpKernel { int64 signed_hash_key_; OP_REQUIRES_OK(context, context->GetAttr("hash_key", &signed_hash_key_)); hash_key_ = static_cast(signed_hash_key_); + OP_REQUIRES_OK(context, context->GetAttr("internal_type", &internal_type_)); } void Compute(OpKernelContext* context) override { @@ -711,8 +743,10 @@ class SparseCrossOp : public OpKernel { OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); - OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, - shapes_list_in, dense_list_in)); + DataType internal_type = internal_type_; + OP_REQUIRES_OK( + context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, + dense_list_in, internal_type)); std::vector>> columns = GenerateColumnsFromInput(indices_list_in, values_list_in, @@ -756,6 +790,7 @@ class SparseCrossOp : public OpKernel { private: int64 num_buckets_; uint64 hash_key_; + DataType internal_type_; }; class SparseCrossV2Op : public OpKernel { @@ -773,8 +808,11 @@ class SparseCrossV2Op : public OpKernel { OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); - OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, - shapes_list_in, dense_list_in)); + // Set internal_type to invalid_type so that the check will be ignored. + DataType internal_type = DT_INVALID; + OP_REQUIRES_OK( + context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, + dense_list_in, internal_type)); const Tensor* sep_t; OP_REQUIRES_OK(context, context->input("sep", &sep_t)); @@ -832,8 +870,11 @@ class SparseCrossHashedOp : public OpKernel { OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); - OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, - shapes_list_in, dense_list_in)); + // Set internal_type to invalid_type so that the check will be ignored. + DataType internal_type = DT_INVALID; + OP_REQUIRES_OK( + context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, + dense_list_in, internal_type)); const Tensor* num_buckets_t; OP_REQUIRES_OK(context, context->input("num_buckets", &num_buckets_t)); From 370e714d7affe3463fba84c0b95d947f43b00380 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 19 Apr 2021 11:33:50 -0700 Subject: [PATCH 071/256] Fix the segfault in `tf.raw_ops.SparseCountSparseOutput`. PiperOrigin-RevId: 369264941 Change-Id: I23a96a15b8370c01ee21ba3841e1c7dcbf55e93d --- tensorflow/core/kernels/count_ops.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index 087deef0812f00..fa48c49a5f92ee 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -193,9 +193,17 @@ class SparseCount : public OpKernel { } bool is_1d = shape.NumElements() == 1; - int num_batches = is_1d ? 1 : shape.flat()(0); + auto shape_vector = shape.flat(); + int num_batches = is_1d ? 1 : shape_vector(0); int num_values = values.NumElements(); + for (int b = 0; b < shape_vector.size(); b++) { + OP_REQUIRES(context, shape_vector(b) >= 0, + errors::InvalidArgument( + "Elements in dense_shape must be >= 0. Instead got:", + shape.DebugString())); + } + OP_REQUIRES(context, num_values == indices.shape().dim_size(0), errors::InvalidArgument( "Number of values must match first dimension of indices.", From c51c97d5fc484d1edb06aaa5d1c7d7304c08dca3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 13:46:32 -0700 Subject: [PATCH 072/256] Validate some shape requirements for `Conv3DBackpropFilter*` and `Conv3DBackpropInput*` ops. Older versions of Eigen might otherwise crash / produce OOB read on specially crafted inputs. PiperOrigin-RevId: 369293977 Change-Id: I58f51445a93936d7cf8e616f75de17677df36718 --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 56 +++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 322da2537f0da5..e39416e61c369a 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -239,6 +239,20 @@ class Conv3DBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -346,6 +360,20 @@ class Conv3DCustomBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -696,6 +724,20 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -808,6 +850,20 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( From 4ec246e2d68702655ded527a339b31b6bbf80927 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 16:00:40 -0700 Subject: [PATCH 073/256] Eliminate a division by 0 in 3D convolutions. --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 104 ++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 322da2537f0da5..ffe49f91c23bd2 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -239,6 +239,28 @@ class Conv3DBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -346,6 +368,28 @@ class Conv3DCustomBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -416,6 +460,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel { // contraction compared to sharding and matmuls. const bool use_parallel_contraction = dims.batch_size == 1; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = use_parallel_contraction ? 1 @@ -696,6 +745,31 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } +<<<<<<< HEAD +======= + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + +>>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -808,6 +882,31 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } +<<<<<<< HEAD +======= + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + +>>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -880,6 +979,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { const int64 work_unit_size = size_A + size_B + size_C; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; From 0dce83fdb7c30dd75d42191e9e2011863877641c Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 20 Apr 2021 12:14:41 -0700 Subject: [PATCH 074/256] Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369492969 Change-Id: I1d70d6c0c92e3d7a25bc3b3aa2a0c0ac9688bf81 --- .../core/kernels/sparse_tensors_map_ops.cc | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc index c2c0e43ca2ba8d..5ea5fca544d3e9 100644 --- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc +++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc @@ -21,9 +21,6 @@ limitations under the License. #include #include -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/resource_mgr.h" @@ -31,6 +28,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -254,7 +252,22 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); - TensorShape tensor_input_shape(input_shape->vec()); + auto input_shape_vec = input_shape->vec(); + int new_num_elements = 1; + bool overflow_ocurred = false; + for (int i = 0; i < input_shape_vec.size(); i++) { + new_num_elements = + MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i)); + if (new_num_elements < 0) { + overflow_ocurred = true; + } + } + + OP_REQUIRES( + context, !overflow_ocurred, + errors::Internal("Encountered overflow from large input shape.")); + + TensorShape tensor_input_shape(input_shape_vec); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; @@ -262,8 +275,7 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { tensor_input_shape, std_order, &input_st)); - auto input_shape_t = input_shape->vec(); - const int64 N = input_shape_t(0); + const int64 N = input_shape_vec(0); Tensor sparse_handles(DT_INT64, TensorShape({N})); auto sparse_handles_t = sparse_handles.vec(); @@ -274,7 +286,7 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { // minibatch entries. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( - input_shape_t.data() + 1, + input_shape_vec.data() + 1, input_shape->NumElements() - 1, &output_shape)); // Get groups by minibatch dimension From 2f1d8532d622c9319492705d1589f02ee8c9b7d0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 17:33:11 -0700 Subject: [PATCH 075/256] Prevent another division by zero. PiperOrigin-RevId: 369338598 Change-Id: I55471d363e401fdcf8d259670ad4eef672b731e2 --- tensorflow/core/kernels/conv_grad_shape_utils.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_shape_utils.cc b/tensorflow/core/kernels/conv_grad_shape_utils.cc index acb052968e1708..942e085b8ac3b2 100644 --- a/tensorflow/core/kernels/conv_grad_shape_utils.cc +++ b/tensorflow/core/kernels/conv_grad_shape_utils.cc @@ -127,6 +127,10 @@ Status ConvBackpropComputeDimensionsV2( // dimensions of the filter Tensor. VLOG(2) << "input vs filter_in depth " << dims->in_depth << " " << filter_shape.dim_size(num_dims - 2); + if (filter_shape.dim_size(num_dims - 2) <= 0) { + return errors ::InvalidArgument( + label, ": filter depth must be strictly greated than zero"); + } if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) { return errors::InvalidArgument( label, ": input depth must be evenly divisible by filter depth"); From 50f3b08159c6b238e666ec34eeb948bf8ff68974 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 18:58:47 -0700 Subject: [PATCH 076/256] Fix one FPE and remove two CHECK-fails. PiperOrigin-RevId: 369349640 Change-Id: I1fedbfc2b5bab635c5cb51f103d7c9176f79831a --- tensorflow/core/kernels/quantized_conv_ops.cc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc index a4d36cca3e4088..a339de8cfc8fa3 100644 --- a/tensorflow/core/kernels/quantized_conv_ops.cc +++ b/tensorflow/core/kernels/quantized_conv_ops.cc @@ -18,6 +18,8 @@ limitations under the License. #include #include +#include "tensorflow/core/platform/errors.h" + #define EIGEN_USE_THREADS #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK @@ -227,8 +229,12 @@ class Im2ColConvFunctor { return; } - CHECK_GT(output_width, 0); - CHECK_GT(output_height, 0); + OP_REQUIRES( + context, output_width > 0, + errors::InvalidArgument("output_width must be strictly positive")); + OP_REQUIRES( + context, output_height > 0, + errors::InvalidArgument("output_height must be strictly positive")); int filter_left_offset; int filter_top_offset; if (padding == VALID) { @@ -255,6 +261,9 @@ class Im2ColConvFunctor { // by the width, then the height. This is the standard memory order in the // image world if it helps to visualize it. const int filter_value_count = filter_width * filter_height * input_depth; + OP_REQUIRES(context, filter_value_count > 0, + errors::InvalidArgument( + "filter patch must contain at least one element")); const int64 patches_per_chunk = kMaxChunkSize / (filter_value_count * sizeof(T1)); const int64 chunk_value_count = From 098f20431384da2e6a407b650f0f84b3457d4304 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 20 Apr 2021 10:52:46 -0700 Subject: [PATCH 077/256] Fix one division by zero PiperOrigin-RevId: 369474832 Change-Id: I1082858ed78d9b2e4738ce30b231955973d49e1e --- tensorflow/core/kernels/quantized_mul_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc index 4e191f162662bb..fb56f68bf14dbb 100644 --- a/tensorflow/core/kernels/quantized_mul_op.cc +++ b/tensorflow/core/kernels/quantized_mul_op.cc @@ -347,6 +347,11 @@ class QuantizedMulOp : public OpKernel { tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } + if (vector_num_elements == 0) { + context->SetStatus( + errors::InvalidArgument("vector must have at least 1 element")); + return; + } VectorTensorMultiply( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); From 0b36c628ec83d6d16230ffebcb287a9ab4a4b896 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 20 Apr 2021 14:45:33 -0700 Subject: [PATCH 078/256] Remove `OP_REQUIRES` call from helper function. Since `OP_REQUIRES` macro expands to a `return;` (among other), calling it in a helper function only ends the helper function's execution earlier, but the kernel will still run from start to end. Thus, all the expected validations are actually broken/useless as the code ploughs through the next crash anyway. PiperOrigin-RevId: 369524386 Change-Id: I54f6cf9328445675ccc392e661b04336b229c9da --- .../core/kernels/sparse/sparse_cholesky_op.cc | 67 ++++++++++--------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc index 9a939276f0b6cb..47ab252317de5e 100644 --- a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc +++ b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc @@ -17,6 +17,8 @@ limitations under the License. #include #include +#include "tensorflow/core/framework/op_requires.h" + #define EIGEN_USE_THREADS #include "third_party/eigen3/Eigen/Core" @@ -82,8 +84,8 @@ class CSRSparseCholeskyCPUOp : public OpKernel { int64 num_rows; int batch_size; - ValidateInputs(ctx, *input_matrix, input_permutation_indices, &batch_size, - &num_rows); + OP_REQUIRES_OK(ctx, ValidateInputs(*input_matrix, input_permutation_indices, + &batch_size, &num_rows)); // Allocate batch pointers. Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); @@ -226,49 +228,48 @@ class CSRSparseCholeskyCPUOp : public OpKernel { } private: - void ValidateInputs(OpKernelContext* ctx, - const CSRSparseMatrix& sparse_matrix, - const Tensor& permutation_indices, int* batch_size, - int64* num_rows) { - OP_REQUIRES(ctx, sparse_matrix.dtype() == DataTypeToEnum::value, - errors::InvalidArgument( - "Asked for a CSRSparseMatrix of type ", - DataTypeString(DataTypeToEnum::value), - " but saw dtype: ", DataTypeString(sparse_matrix.dtype()))); + Status ValidateInputs(const CSRSparseMatrix& sparse_matrix, + const Tensor& permutation_indices, int* batch_size, + int64* num_rows) { + if (sparse_matrix.dtype() != DataTypeToEnum::value) + return errors::InvalidArgument( + "Asked for a CSRSparseMatrix of type ", + DataTypeString(DataTypeToEnum::value), + " but saw dtype: ", DataTypeString(sparse_matrix.dtype())); const Tensor& dense_shape = sparse_matrix.dense_shape(); const int rank = dense_shape.dim_size(0); - OP_REQUIRES(ctx, rank == 2 || rank == 3, - errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", - "but dense_shape has size ", rank)); + if (rank < 2 || rank > 3) + return errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", + "but dense_shape has size ", rank); const int row_dim = (rank == 2) ? 0 : 1; auto dense_shape_vec = dense_shape.vec(); *num_rows = dense_shape_vec(row_dim); const int64 num_cols = dense_shape_vec(row_dim + 1); - OP_REQUIRES(ctx, *num_rows == num_cols, - errors::InvalidArgument("sparse matrix must be square; got: ", - *num_rows, " != ", num_cols)); + if (*num_rows != num_cols) + return errors::InvalidArgument( + "sparse matrix must be square; got: ", *num_rows, " != ", num_cols); const TensorShape& perm_shape = permutation_indices.shape(); - OP_REQUIRES( - ctx, perm_shape.dims() + 1 == rank, - errors::InvalidArgument( - "sparse matrix must have the same rank as permutation; got: ", rank, - " != ", perm_shape.dims(), " + 1.")); - OP_REQUIRES( - ctx, perm_shape.dim_size(rank - 2) == *num_rows, - errors::InvalidArgument( - "permutation must have the same number of elements in each batch " - "as the number of rows in sparse matrix; got: ", - perm_shape.dim_size(rank - 2), " != ", *num_rows)); + if (perm_shape.dims() + 1 != rank) + return errors::InvalidArgument( + "sparse matrix must have the same rank as permutation; got: ", rank, + " != ", perm_shape.dims(), " + 1."); + if (perm_shape.dim_size(rank - 2) != *num_rows) + return errors::InvalidArgument( + "permutation must have the same number of elements in each batch " + "as the number of rows in sparse matrix; got: ", + perm_shape.dim_size(rank - 2), " != ", *num_rows); *batch_size = sparse_matrix.batch_size(); if (*batch_size > 1) { - OP_REQUIRES( - ctx, perm_shape.dim_size(0) == *batch_size, - errors::InvalidArgument("permutation must have the same batch size " - "as sparse matrix; got: ", - perm_shape.dim_size(0), " != ", *batch_size)); + if (perm_shape.dim_size(0) != *batch_size) + return errors::InvalidArgument( + "permutation must have the same batch size " + "as sparse matrix; got: ", + perm_shape.dim_size(0), " != ", *batch_size); } + + return Status::OK(); } }; From 2d3554564210e91fa10c95df6d775d0535033910 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 16:20:48 -0700 Subject: [PATCH 079/256] Fix heap buffer overflow caused by rounding. This was hard to fix. Due to the way we compute the pixels that influence an output pixel in resized images, for certain input configuration we might have issued a read to a pixel that is outside of boundary of the original image. This is because of floating errors that affected truncation results. PiperOrigin-RevId: 369757871 Change-Id: If89425fff930983829a2168203c11858883eebc9 --- tensorflow/core/kernels/quantized_resize_bilinear_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc index 4da56cde5478d5..8270fc11078ba4 100644 --- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc +++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc @@ -64,6 +64,8 @@ inline void ComputeInterpolationWeights( std::max(static_cast(in_f), static_cast(0)); interpolation->upper[i] = std::min(static_cast(std::ceil(in)), in_size - 1); + interpolation->lower[i] = + std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast((in - in_f) * (1 << resolution)); From 2b078fe6af3670bc9263710230341bbd8cbe9e6a Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 13:03:47 -0700 Subject: [PATCH 080/256] Fix null CHECK issue with `tf.raw_ops.EncodePng`. PiperOrigin-RevId: 369717714 Change-Id: I24136cd99c20b8466671f4f93b670ef6f6dd1250 --- tensorflow/core/kernels/encode_png_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/encode_png_op.cc b/tensorflow/core/kernels/encode_png_op.cc index 8dbe1d377df5c6..09bcdbe5e3db0b 100644 --- a/tensorflow/core/kernels/encode_png_op.cc +++ b/tensorflow/core/kernels/encode_png_op.cc @@ -54,6 +54,8 @@ class EncodePngOp : public OpKernel { OP_REQUIRES(context, image.dims() == 3, errors::InvalidArgument("image must be 3-dimensional", image.shape().DebugString())); + OP_REQUIRES(context, image.NumElements() > 0, + errors::Internal("Invalid image provided.")); OP_REQUIRES( context, FastBoundsCheck(image.NumElements(), std::numeric_limits::max()), From a6e729acf7ae9769d0d115526b90a1d404e22b5b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 16:19:54 -0700 Subject: [PATCH 081/256] Fix out of bounds read in `ragged_cross_op.cc`. PiperOrigin-RevId: 369757702 Change-Id: Ie6e5d2c21513a8d56bf41fcf35960caf76e890f9 --- tensorflow/core/kernels/ragged_cross_op.cc | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tensorflow/core/kernels/ragged_cross_op.cc b/tensorflow/core/kernels/ragged_cross_op.cc index ea65c0ee2b5b21..5dfe93f4166592 100644 --- a/tensorflow/core/kernels/ragged_cross_op.cc +++ b/tensorflow/core/kernels/ragged_cross_op.cc @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/util/util.h" #include "tensorflow/core/util/work_sharder.h" @@ -466,16 +467,45 @@ class RaggedCrossOp : public OpKernel { int next_dense = 0; for (char c : input_order_) { if (c == 'R') { + if (next_ragged >= ragged_values_list.size()) + return errors::InvalidArgument( + "input_order \"", input_order_, + "\" specifies reading a ragged tensor value at index ", + next_ragged, " from a list of ", ragged_values_list.size(), + " values."); + if (next_ragged >= ragged_splits_list.size()) + return errors::InvalidArgument( + "input_order \"", input_order_, + "\" specifies reading a ragged tensor split at index ", + next_ragged, " from a list of ", ragged_splits_list.size(), + " splits."); TF_RETURN_IF_ERROR(BuildRaggedFeatureReader( ragged_values_list[next_ragged], ragged_splits_list[next_ragged], features)); next_ragged++; } else if (c == 'S') { + if (next_sparse >= sparse_values_list.size()) + return errors::InvalidArgument( + "input_order \"", input_order_, + "\" specifies reading a sparse tensor value at index ", + next_sparse, " from a list of ", sparse_values_list.size(), + " values."); + if (next_sparse >= sparse_indices_list.size()) + return errors::InvalidArgument( + "input_order \"", input_order_, + "\" specifies reading a sparse tensor index at index ", + next_sparse, " from a list of ", sparse_indices_list.size(), + " indices."); TF_RETURN_IF_ERROR(BuildSparseFeatureReader( sparse_indices_list[next_sparse], sparse_values_list[next_sparse], batch_size, features)); next_sparse++; } else if (c == 'D') { + if (next_dense >= dense_list.size()) + return errors::InvalidArgument( + "input_order \"", input_order_, + "\" specifies reading a dense tensor at index ", next_dense, + " from a list of ", dense_list.size(), " tensors."); TF_RETURN_IF_ERROR( BuildDenseFeatureReader(dense_list[next_dense++], features)); } else { From 65edfe109383648b90464679fddf71860390d26c Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 15:57:36 -0700 Subject: [PATCH 082/256] Fix overflow CHECK issue with `tf.raw_ops.DrawBoundingBoxes`. PiperOrigin-RevId: 369753591 Change-Id: I3b45fc98ee0d28a3c20b7e9c995aa647c976ec40 --- .../core/kernels/draw_bounding_box_op.cc | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/tensorflow/core/kernels/draw_bounding_box_op.cc b/tensorflow/core/kernels/draw_bounding_box_op.cc index 30de99b7d560a2..73db76333f0862 100644 --- a/tensorflow/core/kernels/draw_bounding_box_op.cc +++ b/tensorflow/core/kernels/draw_bounding_box_op.cc @@ -147,22 +147,46 @@ class DrawBoundingBoxesOp : public OpKernel { // At this point, {min,max}_box_{row,col}_clamp are inside the // image. - CHECK_GE(min_box_row_clamp, 0); - CHECK_GE(max_box_row_clamp, 0); - CHECK_LT(min_box_row_clamp, height); - CHECK_LT(max_box_row_clamp, height); - CHECK_GE(min_box_col_clamp, 0); - CHECK_GE(max_box_col_clamp, 0); - CHECK_LT(min_box_col_clamp, width); - CHECK_LT(max_box_col_clamp, width); + OP_REQUIRES( + context, min_box_row_clamp >= 0, + errors::InvalidArgument("Min box row clamp is less than 0.")); + OP_REQUIRES( + context, max_box_row_clamp >= 0, + errors::InvalidArgument("Max box row clamp is less than 0.")); + OP_REQUIRES(context, min_box_row_clamp <= height, + errors::InvalidArgument( + "Min box row clamp is greater than height.")); + OP_REQUIRES(context, max_box_row_clamp <= height, + errors::InvalidArgument( + "Max box row clamp is greater than height.")); + + OP_REQUIRES( + context, min_box_col_clamp >= 0, + errors::InvalidArgument("Min box col clamp is less than 0.")); + OP_REQUIRES( + context, max_box_col_clamp >= 0, + errors::InvalidArgument("Max box col clamp is less than 0.")); + OP_REQUIRES(context, min_box_col_clamp <= width, + errors::InvalidArgument( + "Min box col clamp is greater than width.")); + OP_REQUIRES(context, max_box_col_clamp <= width, + errors::InvalidArgument( + "Max box col clamp is greater than width.")); // At this point, the min_box_row and min_box_col are either // in the image or above/left of it, and max_box_row and // max_box_col are either in the image or below/right or it. - CHECK_LT(min_box_row, height); - CHECK_GE(max_box_row, 0); - CHECK_LT(min_box_col, width); - CHECK_GE(max_box_col, 0); + + OP_REQUIRES( + context, min_box_row <= height, + errors::InvalidArgument("Min box row is greater than height.")); + OP_REQUIRES(context, max_box_row >= 0, + errors::InvalidArgument("Max box row is less than 0.")); + OP_REQUIRES( + context, min_box_col <= width, + errors::InvalidArgument("Min box col is greater than width.")); + OP_REQUIRES(context, max_box_col >= 0, + errors::InvalidArgument("Max box col is less than 0.")); // Draw top line. if (min_box_row >= 0) { From 116ed85a1a51408e6c2ed4324a4116f7c816b5c6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 16:15:46 -0700 Subject: [PATCH 083/256] Validate inputs to `QuantizedMul` PiperOrigin-RevId: 369756982 Change-Id: I00d960cc3b9316fd7a86bd37a44e341c96e17624 --- tensorflow/core/kernels/quantized_mul_op.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc index 4e191f162662bb..b5353822396230 100644 --- a/tensorflow/core/kernels/quantized_mul_op.cc +++ b/tensorflow/core/kernels/quantized_mul_op.cc @@ -284,10 +284,22 @@ class QuantizedMulOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); - const float min_x = context->input(2).flat()(0); - const float max_x = context->input(3).flat()(0); - const float min_y = context->input(4).flat()(0); - const float max_y = context->input(5).flat()(0); + auto& min_x_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), + errors::InvalidArgument("min_x must be a scalar")); + const float min_x = min_x_tensor.flat()(0); + auto& max_x_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), + errors::InvalidArgument("max_x must be a scalar")); + const float max_x = max_x_tensor.flat()(0); + auto& min_y_tensor = context->input(4); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), + errors::InvalidArgument("min_y must be a scalar")); + const float min_y = min_y_tensor.flat()(0); + auto& max_y_tensor = context->input(5); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), + errors::InvalidArgument("max_y must be a scalar")); + const float max_y = max_y_tensor.flat()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { From e52a03f16d5aeebdc7f158e6a7a75d68246a9d2a Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 16:04:48 -0700 Subject: [PATCH 084/256] Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369755048 Change-Id: Ia1663e49ef8387d84baa2c15dccf3506adffde84 --- tensorflow/core/kernels/sparse_concat_op.cc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tensorflow/core/kernels/sparse_concat_op.cc b/tensorflow/core/kernels/sparse_concat_op.cc index 3b2a0cb0f34ed3..d49f92ea556eb2 100644 --- a/tensorflow/core/kernels/sparse_concat_op.cc +++ b/tensorflow/core/kernels/sparse_concat_op.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -66,13 +67,32 @@ class SparseConcatOp : public OpKernel { OP_REQUIRES(context, shapes.size() == N, errors::InvalidArgument("Expected ", N, " input shapes, got ", shapes.size())); + bool overflow_ocurred = false; for (int i = 0; i < N; i++) { + int64 new_num_elements = 1; OP_REQUIRES(context, TensorShapeUtils::IsVector(shapes[i].shape()), errors::InvalidArgument( "Input shapes should be a vector but received shape ", shapes[i].shape().DebugString(), " at position ", i)); + auto input_shape_vector = shapes[i].vec(); + for (int j = 0; j < input_shape_vector.size(); j++) { + new_num_elements = + MultiplyWithoutOverflow(new_num_elements, input_shape_vector(j)); + if (new_num_elements < 0) { + overflow_ocurred = true; + break; + } + } + + if (overflow_ocurred) { + break; + } } + OP_REQUIRES( + context, !overflow_ocurred, + errors::Internal("Encountered overflow from large input shape.")); + const TensorShape input_shape(shapes[0].vec()); const int input_rank = input_shape.dims(); const int concat_dim = (concat_dim_attr_ < 0) From 2261a3ec3c70bd774789370b7d40f84d9e95e008 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 17:00:39 -0700 Subject: [PATCH 085/256] Cherrypick:Validate min and max arguments to . --- .../core/kernels/quantized_resize_bilinear_op.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc index 4da56cde5478d5..cad87d5772313c 100644 --- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc +++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc @@ -701,8 +701,14 @@ class QuantizedResizeBilinearOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); - const float in_min = context->input(2).flat()(0); - const float in_max = context->input(3).flat()(0); + const auto& in_min_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), + errors::InvalidArgument("min must be a scalar")); + const float in_min = in_min_tensor.flat()(0); + const auto& in_max_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), + errors::InvalidArgument("max must be a scalar")); + const float in_max = in_max_tensor.flat()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context, input); From a451bde6ad90d3ac8593d6d3279b4df51308593d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 18:11:15 -0700 Subject: [PATCH 086/256] Validate arguments to `QuantizedReshape`. Ensure that validations from `Reshape` also terminate `QuantizedReshape` on failure. PiperOrigin-RevId: 369775421 Change-Id: If8c5342267aceea65b7cb83a4b183304886f1ce8 --- .../core/kernels/quantized_reshape_op.cc | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_reshape_op.cc b/tensorflow/core/kernels/quantized_reshape_op.cc index bd76c94edeea7a..682f4aaa1f79e7 100644 --- a/tensorflow/core/kernels/quantized_reshape_op.cc +++ b/tensorflow/core/kernels/quantized_reshape_op.cc @@ -17,6 +17,7 @@ limitations under the License. #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_op.h" @@ -30,9 +31,29 @@ class QuantizedReshapeOp : public ReshapeOp { void Compute(OpKernelContext* ctx) override { // This call processes inputs 1 and 2 to write output 0. ReshapeOp::Compute(ctx); + if (!ctx->status().ok()) { + return; + } + + const auto& input_min_float_tensor = ctx->input(2); + const auto& input_min_float_shape = input_min_float_tensor.shape(); + OP_REQUIRES(ctx, + TensorShapeUtils::IsScalar(input_min_float_shape) || + (TensorShapeUtils::IsVector(input_min_float_shape) && + (input_min_float_shape.dim_size(0) == 1)), + errors::InvalidArgument( + "input_min must be a scalar or a vector of 1 element")); + const float input_min_float = input_min_float_tensor.flat()(0); + const auto& input_max_float_tensor = ctx->input(3); + const auto& input_max_float_shape = input_max_float_tensor.shape(); + OP_REQUIRES(ctx, + TensorShapeUtils::IsScalar(input_max_float_shape) || + (TensorShapeUtils::IsVector(input_max_float_shape) && + (input_max_float_shape.dim_size(0) == 1)), + errors::InvalidArgument( + "input_max must be a scalar or a vector of 1 element")); + const float input_max_float = input_max_float_tensor.flat()(0); - const float input_min_float = ctx->input(2).flat()(0); - const float input_max_float = ctx->input(3).flat()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat()(0) = input_min_float; From 0bc653768f0e0dfe613cab5b7bc6447129d54680 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 17:50:10 -0700 Subject: [PATCH 087/256] Fix issues in Conv2DBackpropFilter. PiperOrigin-RevId: 369772454 Change-Id: I49b465f2ae2ce91def61b56cea8000197d5177d8 --- tensorflow/core/kernels/conv_grad_filter_ops.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc index b16d3c7270fde0..d37ac5af59470a 100644 --- a/tensorflow/core/kernels/conv_grad_filter_ops.cc +++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc @@ -496,6 +496,14 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { const int filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.in_depth; + OP_REQUIRES( + context, + filter_total_size * dims.out_depth == filter_backprop->NumElements(), + errors::InvalidArgument( + "filter_size does not have enough elements, requested ", + filter_total_size * dims.out_depth, ", got ", + filter_backprop->NumElements())); + // The output image size is the spatial size of the output. const int output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; @@ -519,6 +527,11 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { const size_t work_unit_size = size_A + size_B + size_C; + OP_REQUIRES( + context, work_unit_size != 0, + errors::InvalidArgument( + "Work size for convolution would be 0, which is not acceptable")); + const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; From 63ed24119c6a1d1f7806e854b428ebe658ce92a7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 22 Apr 2021 13:29:54 -0700 Subject: [PATCH 088/256] Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b --- tensorflow/core/kernels/string_ngrams_op.cc | 52 +++++++++++++++---- .../core/kernels/string_ngrams_op_test.cc | 34 ++++++++++++ 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 8aed2b3831a2f4..7008a1d766af25 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -61,16 +61,28 @@ class StringNGramsOp : public tensorflow::OpKernel { OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat(); - // Validate that the splits are valid indices into data + // Validate that the splits are valid indices into data, only if there are + // splits specified. const int input_data_size = data->flat().size(); const int splits_vec_size = splits_vec.size(); - for (int i = 0; i < splits_vec_size; ++i) { - bool valid_splits = splits_vec(i) >= 0; - valid_splits = valid_splits && (splits_vec(i) <= input_data_size); - OP_REQUIRES( - context, valid_splits, - errors::InvalidArgument("Invalid split value ", splits_vec(i), - ", must be in [0,", input_data_size, "]")); + if (splits_vec_size > 0) { + int prev_split = splits_vec(0); + OP_REQUIRES(context, prev_split == 0, + errors::InvalidArgument("First split value must be 0, got ", + prev_split)); + for (int i = 1; i < splits_vec_size; ++i) { + bool valid_splits = splits_vec(i) >= prev_split; + valid_splits = valid_splits && (splits_vec(i) <= input_data_size); + OP_REQUIRES(context, valid_splits, + errors::InvalidArgument( + "Invalid split value ", splits_vec(i), ", must be in [", + prev_split, ", ", input_data_size, "]")); + prev_split = splits_vec(i); + } + OP_REQUIRES(context, prev_split == input_data_size, + errors::InvalidArgument( + "Last split value must be data size. Expected ", + input_data_size, ", got ", prev_split)); } int num_batch_items = splits_vec.size() - 1; @@ -174,13 +186,31 @@ class StringNGramsOp : public tensorflow::OpKernel { ngram->append(left_pad_); ngram->append(separator_); } + // Only output first num_tokens - 1 pairs of data and separator for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } - ngram->append(data[data_start_index + num_tokens - 1]); - for (int n = 0; n < right_padding; ++n) { - ngram->append(separator_); + // Handle case when there are no tokens or no right padding as these can + // result in consecutive separators. + if (num_tokens > 0) { + // If we have tokens, then output last and then pair each separator with + // the right padding that follows, to ensure ngram ends either with the + // token or with the right pad. + ngram->append(data[data_start_index + num_tokens - 1]); + for (int n = 0; n < right_padding; ++n) { + ngram->append(separator_); + ngram->append(right_pad_); + } + } else { + // If we don't have tokens, then the last item inserted into the ngram + // has been the separator from the left padding loop above. Hence, + // output right pad and separator and make sure to finish with a + // padding, not a separator. + for (int n = 0; n < right_padding - 1; ++n) { + ngram->append(right_pad_); + ngram->append(separator_); + } ngram->append(right_pad_); } diff --git a/tensorflow/core/kernels/string_ngrams_op_test.cc b/tensorflow/core/kernels/string_ngrams_op_test.cc index b89de9ad16dab8..0d52283bd8fb9d 100644 --- a/tensorflow/core/kernels/string_ngrams_op_test.cc +++ b/tensorflow/core/kernels/string_ngrams_op_test.cc @@ -542,6 +542,40 @@ TEST_F(NgramKernelTest, TestEmptyInput) { assert_int64_equal(expected_splits, *GetOutput(1)); } +TEST_F(NgramKernelTest, TestNoTokens) { + MakeOp("|", {3}, "L", "R", -1, false); + // Batch items are: + // 0: + // 1: "a" + AddInputFromArray(TensorShape({1}), {"a"}); + AddInputFromArray(TensorShape({3}), {0, 0, 1}); + TF_ASSERT_OK(RunOpKernel()); + + std::vector expected_values( + {"L|L|R", "L|R|R", // no input in first split + "L|L|a", "L|a|R", "a|R|R"}); // second split + std::vector expected_splits({0, 2, 5}); + + assert_string_equal(expected_values, *GetOutput(0)); + assert_int64_equal(expected_splits, *GetOutput(1)); +} + +TEST_F(NgramKernelTest, TestNoTokensNoPad) { + MakeOp("|", {3}, "", "", 0, false); + // Batch items are: + // 0: + // 1: "a" + AddInputFromArray(TensorShape({1}), {"a"}); + AddInputFromArray(TensorShape({3}), {0, 0, 1}); + TF_ASSERT_OK(RunOpKernel()); + + std::vector expected_values({}); + std::vector expected_splits({0, 0, 0}); + + assert_string_equal(expected_values, *GetOutput(0)); + assert_int64_equal(expected_splits, *GetOutput(1)); +} + TEST_F(NgramKernelTest, ShapeFn) { ShapeInferenceTestOp op("StringNGrams"); INFER_OK(op, "?;?", "[?];[?]"); From 1aec407cf52fb61ddead3aa855feeb4b1705f69b Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 22 Apr 2021 15:11:05 -0700 Subject: [PATCH 089/256] Fix `tf.raw_ops.CTCGreedyDecoder` CHECK failure. PiperOrigin-RevId: 369960465 Change-Id: If0b8b3264d5a47a24ac0970ed7b81ce6b4921fae --- tensorflow/core/kernels/ctc_decoder_ops.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc index d62aef2d03b988..22681f97437f0c 100644 --- a/tensorflow/core/kernels/ctc_decoder_ops.cc +++ b/tensorflow/core/kernels/ctc_decoder_ops.cc @@ -232,6 +232,8 @@ class CTCGreedyDecoderOp : public OpKernel { int prev_indices = -1; for (int t = 0; t < seq_len_t(b); ++t) { int max_class_indices; + OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0, + errors::InvalidArgument("Invalid input dimensions.")); log_prob_t(b, 0) += -RowMax(input_list_t[t], b, &max_class_indices); if (max_class_indices != blank_index && From fe0e031acfb3330168b63d7b48b9ac56ef878644 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 10:41:12 -0700 Subject: [PATCH 090/256] Fix crash in `SparseTensorToCSRSparseMatrixCPUFunctor` PiperOrigin-RevId: 370110290 Change-Id: I4451e92661a55c2180f80d38b67a9b50bf5edec5 --- tensorflow/core/kernels/sparse/kernels.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/sparse/kernels.cc b/tensorflow/core/kernels/sparse/kernels.cc index 0eea9f1feed5c3..dff9aeb83ccfec 100644 --- a/tensorflow/core/kernels/sparse/kernels.cc +++ b/tensorflow/core/kernels/sparse/kernels.cc @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace functor { @@ -63,6 +64,11 @@ Status SparseTensorToCSRSparseMatrixCPUFunctor::operator()( for (int64 i = 0; i < total_nnz; ++i) { // For now, the rows pointers store the corresponding row counts. + int64 ix = indices(i, 0) + 1; + if (ix >= csr_row_ptr.size()) { + return errors::InvalidArgument("Got an index ", ix, + " that is outside of csr_row_ptr"); + } csr_row_ptr(indices(i, 0) + 1) += 1; csr_col_ind(i) = indices(i, 1); } From 82c8fa744bcf441f0f8a8aef062d454e4fa69f42 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 11:11:39 -0700 Subject: [PATCH 091/256] Prevent division by 0 in `QuantizedBiasAdd`. PiperOrigin-RevId: 370117454 Change-Id: I3804e2ac8dcc6d3afcc92e27853e2325a017ca4d --- tensorflow/core/kernels/quantized_bias_add_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_bias_add_op.cc b/tensorflow/core/kernels/quantized_bias_add_op.cc index 5457d290c2559c..db0e21a498011d 100644 --- a/tensorflow/core/kernels/quantized_bias_add_op.cc +++ b/tensorflow/core/kernels/quantized_bias_add_op.cc @@ -56,6 +56,8 @@ class QuantizedBiasAddOp : public OpKernel { "Must provide as many biases as the last dimension " "of the input tensor: ", bias.shape().DebugString(), " vs. ", input.shape().DebugString())); + OP_REQUIRES(context, bias.NumElements() > 0, + errors::InvalidArgument("Must provide at least 1 bias")); Tensor* output = nullptr; OP_REQUIRES_OK(context, From 69521621908f6623bf23d1dcca013e93848cba08 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 11:40:06 -0700 Subject: [PATCH 092/256] Add missing validation in `QuantizedBatchNormWithGlobalNormalization` PiperOrigin-RevId: 370123451 Change-Id: Id234d6dab1ec21230bb8e503dba30f899af87f33 --- .../core/kernels/quantized_batch_norm_op.cc | 77 ++++++++++++++++--- 1 file changed, 67 insertions(+), 10 deletions(-) diff --git a/tensorflow/core/kernels/quantized_batch_norm_op.cc b/tensorflow/core/kernels/quantized_batch_norm_op.cc index b03da7ad17fab4..6dfe07f97a4007 100644 --- a/tensorflow/core/kernels/quantized_batch_norm_op.cc +++ b/tensorflow/core/kernels/quantized_batch_norm_op.cc @@ -173,20 +173,50 @@ class QuantizedBatchNormOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); - const float input_min = context->input(1).flat()(0); - const float input_max = context->input(2).flat()(0); + const auto& input_min_tensor = context->input(1); + OP_REQUIRES(context, input_min_tensor.NumElements() == 1, + errors::InvalidArgument("input_min must have 1 element")); + const float input_min = input_min_tensor.flat()(0); + const auto& input_max_tensor = context->input(2); + OP_REQUIRES(context, input_max_tensor.NumElements() == 1, + errors::InvalidArgument("input_max must have 1 element")); + const float input_max = input_max_tensor.flat()(0); const Tensor& mean = context->input(3); - const float mean_min = context->input(4).flat()(0); - const float mean_max = context->input(5).flat()(0); + const auto& mean_min_tensor = context->input(4); + OP_REQUIRES(context, mean_min_tensor.NumElements() == 1, + errors::InvalidArgument("mean_min must have 1 element")); + const float mean_min = mean_min_tensor.flat()(0); + const auto& mean_max_tensor = context->input(5); + OP_REQUIRES(context, mean_max_tensor.NumElements() == 1, + errors::InvalidArgument("mean_max must have 1 element")); + const float mean_max = mean_max_tensor.flat()(0); const Tensor& var = context->input(6); - const float var_min = context->input(7).flat()(0); - const float var_max = context->input(8).flat()(0); + const auto& var_min_tensor = context->input(7); + OP_REQUIRES(context, var_min_tensor.NumElements() == 1, + errors::InvalidArgument("var_min must have 1 element")); + const float var_min = var_min_tensor.flat()(0); + const auto& var_max_tensor = context->input(8); + OP_REQUIRES(context, var_max_tensor.NumElements() == 1, + errors::InvalidArgument("var_max must have 1 element")); + const float var_max = var_max_tensor.flat()(0); const Tensor& beta = context->input(9); - const float beta_min = context->input(10).flat()(0); - const float beta_max = context->input(11).flat()(0); + const auto& beta_min_tensor = context->input(10); + OP_REQUIRES(context, beta_min_tensor.NumElements() == 1, + errors::InvalidArgument("beta_min must have 1 element")); + const float beta_min = beta_min_tensor.flat()(0); + const auto& beta_max_tensor = context->input(11); + OP_REQUIRES(context, beta_max_tensor.NumElements() == 1, + errors::InvalidArgument("beta_max must have 1 element")); + const float beta_max = beta_max_tensor.flat()(0); const Tensor& gamma = context->input(12); - const float gamma_min = context->input(13).flat()(0); - const float gamma_max = context->input(14).flat()(0); + const auto& gamma_min_tensor = context->input(13); + OP_REQUIRES(context, gamma_min_tensor.NumElements() == 1, + errors::InvalidArgument("gamma_min must have 1 element")); + const float gamma_min = gamma_min_tensor.flat()(0); + const auto& gamma_max_tensor = context->input(14); + OP_REQUIRES(context, gamma_max_tensor.NumElements() == 1, + errors::InvalidArgument("gamma_max must have 1 element")); + const float gamma_max = gamma_max_tensor.flat()(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", @@ -203,6 +233,33 @@ class QuantizedBatchNormOp : public OpKernel { OP_REQUIRES(context, gamma.dims() == 1, errors::InvalidArgument("gamma must be 1-dimensional", gamma.shape().DebugString())); + OP_REQUIRES(context, mean.NumElements() > 1, + errors::InvalidArgument("Must have at least a mean value", + gamma.shape().DebugString())); + OP_REQUIRES(context, mean.NumElements() > 1, + errors::InvalidArgument("Must have at least a mean value")); + const auto last_dim = input.shape().dims() - 1; + OP_REQUIRES(context, + mean.shape().dim_size(0) == input.shape().dim_size(last_dim), + errors::InvalidArgument("Must provide as many means as the " + "last dimension of the input tensor: ", + mean.shape().DebugString(), " vs. ", + input.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == var.shape().dim_size(0), + errors::InvalidArgument( + "Mean and variance tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", var.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == beta.shape().dim_size(0), + errors::InvalidArgument( + "Mean and beta tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", beta.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == gamma.shape().dim_size(0), + errors::InvalidArgument( + "Mean and gamma tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", gamma.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, From 1e3aaf668d3d29d742fa0dc330bc0f017ba3d990 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 12:00:12 -0700 Subject: [PATCH 093/256] Validate work in `QuantizedAdd`, ensure at least one element. PiperOrigin-RevId: 370127996 Change-Id: I57c6f3e01afdeada84737820a131590137463855 --- tensorflow/core/kernels/quantized_add_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_add_op.cc b/tensorflow/core/kernels/quantized_add_op.cc index 55c69de7d3ea6c..b186f00f15c061 100644 --- a/tensorflow/core/kernels/quantized_add_op.cc +++ b/tensorflow/core/kernels/quantized_add_op.cc @@ -538,6 +538,8 @@ class QuantizedAddOp : public OpKernel { tensor_min = min_x; tensor_max = max_x; } + OP_REQUIRES(context, vector_num_elements > 0, + errors::InvalidArgument("Must have some elements to add")); VectorTensorAddition( vector_data, vector_min, vector_max, vector_num_elements, tensor_data, tensor_min, tensor_max, tensor_num_elements, min_z_value, max_z_value, From 1850de8166b8cc80a14e6aa7a0a0a2640f504c70 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Thu, 29 Apr 2021 08:38:16 -0700 Subject: [PATCH 094/256] Fix divide by zero error in `fractional_pool_common.cc`. PiperOrigin-RevId: 371126221 Change-Id: Iea4b2f363aaeb116ab460e3bc592c687484af344 --- tensorflow/core/kernels/fractional_avg_pool_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index dfc2382624e3fa..b8a5083e5340f1 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -80,6 +80,10 @@ class FractionalAvgPoolOp : public OpKernel { std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); + OP_REQUIRES( + context, pooling_ratio_[i] <= input_size[i], + errors::InvalidArgument( + "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { From 13aeb459abd15fc4efaeebe67d09d53c542146af Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 23 May 2021 16:21:09 -0700 Subject: [PATCH 095/256] Apply suggestions from code review --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index ffe49f91c23bd2..1ef931a97d93da 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -745,8 +745,6 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } -<<<<<<< HEAD -======= OP_REQUIRES(context, input_shape.dims() == 5, errors::InvalidArgument("input tensor must have 5 dimensions")); OP_REQUIRES( @@ -769,7 +767,6 @@ class Conv3DBackpropFilterOp : public OpKernel { " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ->>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -882,8 +879,6 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } -<<<<<<< HEAD -======= OP_REQUIRES(context, input_shape.dims() == 5, errors::InvalidArgument("input tensor must have 5 dimensions")); OP_REQUIRES( @@ -906,7 +901,6 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ->>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( From 02fd9d38c4431d2f911377e83d34fac6d6a11382 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 24 Apr 2021 16:47:25 -0700 Subject: [PATCH 096/256] Validate (and ensure validation sticks) inputs for `MatrixTriangularSolve`. PiperOrigin-RevId: 370282444 Change-Id: Iaed61a0b0727cc42c830658b72eb69f785f48dc5 --- .../kernels/matrix_triangular_solve_op_impl.h | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/matrix_triangular_solve_op_impl.h b/tensorflow/core/kernels/matrix_triangular_solve_op_impl.h index fb7e6f0f5ffe1d..d4ffcf18a52290 100644 --- a/tensorflow/core/kernels/matrix_triangular_solve_op_impl.h +++ b/tensorflow/core/kernels/matrix_triangular_solve_op_impl.h @@ -162,6 +162,9 @@ class BaseMatrixTriangularSolveOp : public OpKernel { const Tensor& in1 = ctx->input(1); ValidateInputTensors(ctx, in0, in1); + if (!ctx->status().ok()) { + return; + } MatMulBCast bcast(in0.shape().dim_sizes(), in1.shape().dim_sizes()); OP_REQUIRES( @@ -230,13 +233,22 @@ class MatrixTriangularSolveOp private: void ValidateInputTensors(OpKernelContext* ctx, const Tensor& in0, const Tensor& in1) override { + const auto in0_num_dims = in0.dims(); OP_REQUIRES( - ctx, in0.dims() >= 2, - errors::InvalidArgument("In[0] ndims must be >= 2: ", in0.dims())); + ctx, in0_num_dims >= 2, + errors::InvalidArgument("In[0] ndims must be >= 2: ", in0_num_dims)); + const auto in1_num_dims = in1.dims(); OP_REQUIRES( - ctx, in1.dims() >= 2, - errors::InvalidArgument("In[0] ndims must be >= 2: ", in1.dims())); + ctx, in1_num_dims >= 2, + errors::InvalidArgument("In[1] ndims must be >= 2: ", in1_num_dims)); + + const auto in0_last_dim = in0.dim_size(in0_num_dims - 1); + const auto in0_prev_dim = in0.dim_size(in0_num_dims - 2); + OP_REQUIRES(ctx, in0_last_dim == in0_prev_dim, + errors::InvalidArgument( + "In[0] matrices in the last dimensions must be square (", + in0_last_dim, " =/= ", in0_prev_dim, ")")); } }; From f5eaa14ef153cab1b76f6b162f33190886069a09 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 26 Apr 2021 16:40:49 -0700 Subject: [PATCH 097/256] Fix `tf.raw_ops.SparseAdd ` invalid memory access failure. PiperOrigin-RevId: 370568774 Change-Id: I5f73b31c865f2948a1c8dfb7ebd22b3cfb6405bf --- tensorflow/core/kernels/sparse_add_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_add_op.cc b/tensorflow/core/kernels/sparse_add_op.cc index 0cf40a709a39a7..346206365af8d5 100644 --- a/tensorflow/core/kernels/sparse_add_op.cc +++ b/tensorflow/core/kernels/sparse_add_op.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" @@ -101,6 +102,10 @@ class SparseAddOp : public OpKernel { std::vector out_values; const int num_dims = a_shape->dim_size(0); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Invalid input_a shape. Received: ", + a_shape->DebugString())); + // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; From 2e5b9d712b8386caf6ccc2b979178522c1162ec7 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 26 Apr 2021 17:32:41 -0700 Subject: [PATCH 098/256] Fix `tf.raw_ops.QuantizeAndDequantizeV3` array index failure. PiperOrigin-RevId: 370577691 Change-Id: Ifeae64212f6bcd139435824fa2748d1329213c4c --- tensorflow/core/kernels/quantize_and_dequantize_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 8f71d09c0832e7..a66ba66a98a84f 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/core/framework/op_requires.h" #define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ @@ -151,6 +152,10 @@ class QuantizeAndDequantizeV3Op : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES(ctx, axis_ < input.dims(), + errors::InvalidArgument( + "Axis requested is larger than input dimensions. Axis: ", + axis_, " Input Dimensions: ", input.dims())); const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); From f92462f3914ed3bf66c23e6a61fe3db5627db6bc Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 27 Apr 2021 14:41:40 -0700 Subject: [PATCH 099/256] Fix overflow CHECK issue with `tf.raw_ops.UnsortedSegmentJoin`. PiperOrigin-RevId: 370766155 Change-Id: I33e7c6626224e1060a8a4ab51ad5d861c6d4c63e --- tensorflow/core/kernels/unsorted_segment_join_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/unsorted_segment_join_op.cc b/tensorflow/core/kernels/unsorted_segment_join_op.cc index 7464e165e46c8b..9acfe7fb1e4952 100644 --- a/tensorflow/core/kernels/unsorted_segment_join_op.cc +++ b/tensorflow/core/kernels/unsorted_segment_join_op.cc @@ -90,6 +90,8 @@ class UnsortedSegmentJoinOp : public OpKernel { const int32 segment_dims = segment_id_shape.dims(); const Tensor& num_segments_tensor = context->input(2); + OP_REQUIRES(context, num_segments_tensor.NumElements() != 0, + errors::InvalidArgument("Number of segments cannot be empty.")); auto num_segments = num_segments_tensor.scalar()(); OP_REQUIRES(context, segment_dims != 0, From b86345ef5092d56a0d7f255aefa80ae260ed4bd6 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 11:24:45 -0700 Subject: [PATCH 100/256] Fix FPE issue with `tf.raw_ops.DenseCountSparseOutput`. PiperOrigin-RevId: 370946862 Change-Id: I3752584ad04aaecb327ff6793a9640ac56acfe7a --- tensorflow/core/kernels/count_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc index eeb0b853c7aad2..40aa1fe458c1ee 100644 --- a/tensorflow/core/kernels/count_ops.cc +++ b/tensorflow/core/kernels/count_ops.cc @@ -122,6 +122,9 @@ class DenseCount : public OpKernel { int num_batch_elements = 1; for (int i = 0; i < num_batch_dimensions; ++i) { + OP_REQUIRES(context, data.shape().dim_size(i) != 0, + errors::InvalidArgument( + "Invalid input: Shapes dimension cannot be 0.")); num_batch_elements *= data.shape().dim_size(i); } int num_value_elements = data.shape().num_elements() / num_batch_elements; From 5ae65f75c50188468667f2e3a49b8ff2c6ab30ec Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 11:30:28 -0700 Subject: [PATCH 101/256] Fix FPE issue with `tf.raw_ops.FusedBatchNorm`. PiperOrigin-RevId: 370948185 Change-Id: If0c8e0320062ed6363e94ff5fe38e6a301f69ac2 --- tensorflow/core/kernels/fused_batch_norm_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc index 00ac9be6dcd17f..59470c8ac6e027 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.cc +++ b/tensorflow/core/kernels/fused_batch_norm_op.cc @@ -293,6 +293,9 @@ struct FusedBatchNorm { const CPUDevice& d = context->eigen_device(); const int depth = x.dimension(3); + OP_REQUIRES( + context, depth != 0, + errors::Internal("The 4th element in the input shape cannot be 0.")); const int size = x.size(); const int rest_size = size / depth; Eigen::DSizes rest_by_depth(rest_size, depth); From 099197e2374f72cec89efc16316cbe675aaabdbf Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 15:00:39 -0700 Subject: [PATCH 102/256] Fix FPE issue in external Eigen source code issue with `tf.raw_ops.SparseMatMul`. PiperOrigin-RevId: 370992919 Change-Id: Icfb276fef5fb40928b27c3e44608d2aca72c9fd7 --- tensorflow/core/kernels/sparse_matmul_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/sparse_matmul_op.cc b/tensorflow/core/kernels/sparse_matmul_op.cc index eb460147d71f39..1f66f8058a613a 100644 --- a/tensorflow/core/kernels/sparse_matmul_op.cc +++ b/tensorflow/core/kernels/sparse_matmul_op.cc @@ -1039,6 +1039,10 @@ class SparseMatMulOp : public OpKernel { if (transpose_b) { // TODO(agarwal): avoid transposing the matrix here and directly handle // transpose in CreateDenseSlices. + OP_REQUIRES(ctx, right->dim_size(0) != 0, + errors::InvalidArgument("b has an entry 0 in it's shape.")); + OP_REQUIRES(ctx, right->dim_size(1) != 0, + errors::InvalidArgument("b has an entry 0 in it's shape.")); right_tr.reset( new Tensor(right->dtype(), TensorShape({right->dim_size(1), right->dim_size(0)}))); From 903110714b2c7840f05023092b648ea5107e98d7 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 12:24:18 -0700 Subject: [PATCH 103/256] Fix FPE issue with `tf.raw_ops.Reverse`. PiperOrigin-RevId: 371176973 Change-Id: Ic6d483bfc95313ec2299c2d1c956cfe96c96626c --- tensorflow/core/kernels/reverse_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/reverse_op.cc b/tensorflow/core/kernels/reverse_op.cc index d551d1ee4bc66c..a7605be4660357 100644 --- a/tensorflow/core/kernels/reverse_op.cc +++ b/tensorflow/core/kernels/reverse_op.cc @@ -158,6 +158,12 @@ class ReverseOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); + // If input is provided, check to make sure the first dimension is valid. + if (input.dims() > 0) { + OP_REQUIRES( + context, input.dim_size(0) != 0, + errors::InvalidArgument("Invalid input first dimension. Found 0.")); + } const Tensor& dims = context->input(1); if (TensorShapeUtils::IsScalar(input.shape())) { From de904d9c3a075d86914b931d633e42081b7ca49e Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 16:06:54 -0700 Subject: [PATCH 104/256] Fix OOB issue with `tf.raw_ops.SparseSparseMinimum`. PiperOrigin-RevId: 371005787 Change-Id: Ib686ccc077836e8b980b8b5a03936d36a8ecaf71 --- tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc index 43dc9ae70cd627..9fe42e05d879ee 100644 --- a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc @@ -180,6 +180,11 @@ class SparseSparseBinaryOpShared : public OpKernel { " for dimension ", i)); } + OP_REQUIRES( + ctx, a_indices_t->dim_size(1) == b_indices_t->dim_size(1), + errors::InvalidArgument( + "Indices' dimensions do not match: got ", a_indices_t->dim_size(1), + " and ", b_indices_t->dim_size(1), " for the second dimension.")); const int num_dims = a_indices_t->dim_size(1); const auto a_indices_mat = a_indices_t->matrix(); const auto b_indices_mat = b_indices_t->matrix(); From 6ec87e02d582e555d403223d50a0281e31f8939e Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 17:58:08 -0700 Subject: [PATCH 105/256] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`. PiperOrigin-RevId: 371242872 Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4 --- tensorflow/core/util/sparse/sparse_tensor.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index bc4e2c88f1c99e..dac51aac08b7a3 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -527,6 +527,10 @@ inline Status SparseTensor::Split(const SparseTensor& input_tensor, for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); + if (slice_index >= num_values.size()) { + return errors::InvalidArgument("Slice index ", slice_index, + " is larger than num_split."); + } num_values[slice_index]++; } From ce30ca3641cdb2d19e08be13dd86d952b767605d Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Fri, 30 Apr 2021 10:39:05 -0700 Subject: [PATCH 106/256] Fix the CHECK failure in tf.raw_ops.QuantizeAndDequantizeV2. --- tensorflow/core/kernels/quantize_and_dequantize_op.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 8f71d09c0832e7..784bc3302e2482 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -71,6 +71,13 @@ class QuantizeAndDequantizeV2Op : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES( + ctx, axis_ >= -1, + errors::InvalidArgument("Axis must be at least -1. Found ", axis_)); + OP_REQUIRES( + ctx, (axis_ == -1 || axis_ < input.shape().dims()), + errors::InvalidArgument("Shape must be at least rank ", axis_ + 1, + " but is rank ", input.shape().dims())); const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_); Tensor input_min_tensor; Tensor input_max_tensor; From 4f3a1bf4765dde9c35a6ae9797a9aa444752db7e Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Mon, 3 May 2021 09:53:26 -0700 Subject: [PATCH 107/256] Fix heap buffer overflow in tf.raw_ops.UnicodeEncode. PiperOrigin-RevId: 371717714 Change-Id: If33443b28f158e58078f1268f6b92f2728d219e0 --- tensorflow/core/kernels/unicode_ops.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tensorflow/core/kernels/unicode_ops.cc b/tensorflow/core/kernels/unicode_ops.cc index d3a7ad7b2866f7..e6c8f4dfc42284 100644 --- a/tensorflow/core/kernels/unicode_ops.cc +++ b/tensorflow/core/kernels/unicode_ops.cc @@ -533,6 +533,17 @@ class UnicodeEncodeOp : public OpKernel { const Tensor& input_splits = context->input(1); const auto input_splits_flat = input_splits.flat(); + // Operation will treat first argument in input_splits as if it were zero + // regardless of its actual value since splits should begin with zero and + // end with the length of the input values vector. + OP_REQUIRES( + context, input_splits_flat(0) == 0, + errors::InvalidArgument("First value in input_splits must be zero.")); + OP_REQUIRES(context, + input_splits_flat(input_splits_flat.size() - 1) == + input_tensor_flat.size(), + errors::InvalidArgument("Last value in input_splits must be " + "equal to length of input_tensor.")); // Since we limit to a 2-D input (flat_values of rank 1 and a single splits // tensor), our output dimension will be 1 with it's size equal to the // number of splits (outer dimension or ragged tensor). @@ -548,6 +559,14 @@ class UnicodeEncodeOp : public OpKernel { for (int i = 1; i < input_splits_flat.size(); ++i) { icu::UnicodeString unicode_string; icu::UnicodeStringAppendable appendable_unicode_string(unicode_string); + OP_REQUIRES( + context, input_splits_flat(i - 1) <= input_splits_flat(i), + errors::InvalidArgument( + "Values in input_splits must be equal or in ascending order.")); + OP_REQUIRES( + context, input_splits_flat(i) <= input_tensor_flat.size(), + errors::InvalidArgument("Values in input_splits must be less than or " + "equal to input_tensor length.")); for (; idx < input_splits_flat(i); ++idx) { int32 code_point = input_tensor_flat(idx); // Check for invalid code point From 59bd94b8982f11ce51f1dacc2c8bcd586618ff59 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 17:58:08 -0700 Subject: [PATCH 108/256] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`. PiperOrigin-RevId: 371242872 Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4 --- tensorflow/core/util/sparse/sparse_tensor.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index bc4e2c88f1c99e..dac51aac08b7a3 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -527,6 +527,10 @@ inline Status SparseTensor::Split(const SparseTensor& input_tensor, for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); + if (slice_index >= num_values.size()) { + return errors::InvalidArgument("Slice index ", slice_index, + " is larger than num_split."); + } num_values[slice_index]++; } From e39f3841424749d224e0712b42bdcd6095fda926 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 13:45:57 -0700 Subject: [PATCH 109/256] Fix heap-buffer-overflow issue with `tf.raw_ops.RaggedTensorToTensor`. PiperOrigin-RevId: 371986929 Change-Id: I79ab962a22c5867f36f7f45b780a1ac881b1dbdd --- tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index 599f42fa3fe65f..d452c4d19c3711 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -313,6 +313,12 @@ class RaggedTensorToTensorBaseOp : public OpKernel { output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: + if (row_partition_tensor.size() - 1 > parent_output_index.size()) { + return errors::InvalidArgument( + "Row partition size is greater than output size: ", + row_partition_tensor.size() - 1, " > ", + parent_output_index.size()); + } CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); From fd41d6b24295f05c00eb7affb16a3e53b237d0e8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 3 May 2021 20:35:51 -0700 Subject: [PATCH 110/256] Fix another Eigen missing validation PiperOrigin-RevId: 371833155 Change-Id: I5a23d451132cb1624ad916ef46ea01d0e88ec82c --- tensorflow/core/kernels/banded_triangular_solve_op.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/core/kernels/banded_triangular_solve_op.cc b/tensorflow/core/kernels/banded_triangular_solve_op.cc index 666282e52c8031..cd1b908043abe1 100644 --- a/tensorflow/core/kernels/banded_triangular_solve_op.cc +++ b/tensorflow/core/kernels/banded_triangular_solve_op.cc @@ -275,6 +275,14 @@ class BandedTriangularSolveOpCpu : public OpKernel { OP_REQUIRES( ctx, in1.dims() >= 2, errors::InvalidArgument("In[1] ndims must be >= 2: ", in1.dims())); + + OP_REQUIRES(ctx, in0.NumElements() > 0, + errors::InvalidArgument("In[0] must not be an empty tensor: ", + in0.DebugString())); + + OP_REQUIRES(ctx, in1.NumElements() > 0, + errors::InvalidArgument("In[1] must not be an empty tensor: ", + in1.DebugString())); } bool lower_; bool adjoint_; From ecb3ae3c09f3b17620f574acf0e179a6d2ef4579 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 15:46:30 -0700 Subject: [PATCH 111/256] Fix a check fail PiperOrigin-RevId: 372011072 Change-Id: I1062cfaed0aa16884e9a16312483794d188db76f --- tensorflow/core/kernels/load_and_remap_matrix_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/load_and_remap_matrix_op.cc b/tensorflow/core/kernels/load_and_remap_matrix_op.cc index cb0245a9b61261..5ec28c70358132 100644 --- a/tensorflow/core/kernels/load_and_remap_matrix_op.cc +++ b/tensorflow/core/kernels/load_and_remap_matrix_op.cc @@ -123,6 +123,11 @@ class LoadAndRemapMatrixOp : public OpKernel { // Processes the checkpoint source and the provided Tensor name. const Tensor* ckpt_path_t; OP_REQUIRES_OK(context, context->input("ckpt_path", &ckpt_path_t)); + OP_REQUIRES( + context, ckpt_path_t->NumElements() == 1, + errors::InvalidArgument("The `ckpt_path` tensor must have exactly one " + "element, got tensor of shape ", + ckpt_path_t->shape().DebugString())); const string& ckpt_path = ckpt_path_t->scalar()(); const Tensor* old_tensor_name_t; OP_REQUIRES_OK(context, From d46389df3e8a7fb996789eca6d677c14b18ca97d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 17:11:46 -0700 Subject: [PATCH 112/256] Fix a check fail in Fast Fourier implementation PiperOrigin-RevId: 372026629 Change-Id: Id05c3362aa575271bc3e06b16316c9037085fc11 --- tensorflow/core/kernels/fft_ops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/fft_ops.cc b/tensorflow/core/kernels/fft_ops.cc index 058435948394c5..f396cc0a577ae1 100644 --- a/tensorflow/core/kernels/fft_ops.cc +++ b/tensorflow/core/kernels/fft_ops.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS // See docs in ../ops/fft_ops.cc. @@ -261,6 +262,9 @@ class FFTCPU : public FFTBase { i == FFTRank ? fft_shape[i - 1] / 2 + 1 : fft_shape[i - 1]; full_fft_shape.AddDim(fft_shape[i - 1]); } + OP_REQUIRES(ctx, full_fft_shape.num_elements() > 0, + errors::InvalidArgument("Obtained a FFT shape of 0 elements: ", + full_fft_shape.DebugString())); Tensor temp; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum::v(), From f116a606f2bfdb2febb4f2f904429e529137fa03 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 17:42:54 -0700 Subject: [PATCH 113/256] Prevent check fail in FFT PiperOrigin-RevId: 372031044 Change-Id: I50994e3e8a5d1342d01bde80256f6bf2730ca299 --- tensorflow/core/kernels/fft_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/fft_ops.cc b/tensorflow/core/kernels/fft_ops.cc index 058435948394c5..462e3f28bbadfb 100644 --- a/tensorflow/core/kernels/fft_ops.cc +++ b/tensorflow/core/kernels/fft_ops.cc @@ -221,6 +221,9 @@ class FFTCPU : public FFTBase { input_slice_sizes[i] = fft_shape[i - 1]; temp_shape.AddDim(fft_shape[i - 1]); } + OP_REQUIRES(ctx, temp_shape.num_elements() > 0, + errors::InvalidArgument("Obtained a FFT shape of 0 elements: ", + temp_shape.DebugString())); auto output = out->flat_inner_dims(); const Eigen::DSizes zero_start_indices; From abb64c3b0c21fff3883f6f9f5399552a72cff6b3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 18:06:03 -0700 Subject: [PATCH 114/256] Fix multiple issues in EditDistance PiperOrigin-RevId: 372033948 Change-Id: Ieb957c29894af05bdfeb1a0402fced808dfcfd7b --- tensorflow/core/kernels/edit_distance_op.cc | 47 +++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tensorflow/core/kernels/edit_distance_op.cc b/tensorflow/core/kernels/edit_distance_op.cc index 4aecdc9e414d36..386a1af08409f6 100644 --- a/tensorflow/core/kernels/edit_distance_op.cc +++ b/tensorflow/core/kernels/edit_distance_op.cc @@ -64,6 +64,12 @@ Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, return errors::InvalidArgument( "truth_shape should be a vector, but got shape: ", truth_shape.shape().DebugString()); + if (hypothesis_values.NumElements() != hypothesis_indices.dim_size(0)) + return errors::InvalidArgument( + "Expected hypothesis_values.NumElements == " + "#rows(hypothesis_indices), their shapes are: ", + hypothesis_values.shape().DebugString(), " and ", + hypothesis_indices.shape().DebugString()); if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1)) return errors::InvalidArgument( "Expected hypothesis_shape.NumElements == " @@ -75,6 +81,12 @@ Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, "Input SparseTensors must have rank at least 2, but truth_shape " "rank is: ", truth_shape.NumElements()); + if (truth_values.NumElements() != truth_indices.dim_size(0)) + return errors::InvalidArgument( + "Expected truth_values.NumElements == " + "#rows(truth_indices), their shapes are: ", + truth_values.shape().DebugString(), " and ", + truth_indices.shape().DebugString()); if (truth_shape.NumElements() != truth_indices.dim_size(1)) return errors::InvalidArgument( "Expected truth_shape.NumElements == " @@ -153,6 +165,11 @@ class EditDistanceOp : public OpKernel { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } + const auto output_elements = output_shape.num_elements(); + OP_REQUIRES( + ctx, output_elements > 0, + errors::InvalidArgument("Got output shape ", output_shape.DebugString(), + " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); @@ -185,6 +202,12 @@ class EditDistanceOp : public OpKernel { if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); @@ -194,6 +217,12 @@ class EditDistanceOp : public OpKernel { } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits::infinity(); @@ -202,6 +231,12 @@ class EditDistanceOp : public OpKernel { } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } @@ -212,6 +247,12 @@ class EditDistanceOp : public OpKernel { auto hypothesis_seq = hypothesis_j.values(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of the " + "buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits::infinity(); @@ -224,6 +265,12 @@ class EditDistanceOp : public OpKernel { auto truth_seq = truth_i.values(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of the " + "buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } From bce9d7c9545bf09fce34fbf4e7006508fd9260e9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 18:33:28 -0700 Subject: [PATCH 115/256] Add missing validations in dillation ops. PiperOrigin-RevId: 372037158 Change-Id: I4ee304c84a02550c030288a6534000b934fc1599 --- tensorflow/core/kernels/dilation_ops.cc | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/dilation_ops.cc b/tensorflow/core/kernels/dilation_ops.cc index 738ea31d555d5f..996ddb62bfefeb 100644 --- a/tensorflow/core/kernels/dilation_ops.cc +++ b/tensorflow/core/kernels/dilation_ops.cc @@ -130,6 +130,7 @@ class DilationOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Output tensor is of the following dimensions: // [ batch, out_rows, out_cols, depth ] @@ -229,6 +230,7 @@ class DilationBackpropInputOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] @@ -318,8 +320,10 @@ struct DilationBackpropInput { } } } - in_backprop(b, h_in_max, w_in_max, d) += - out_backprop(b, h_out, w_out, d); + if (h_in_max < input_rows && w_in_max < input_cols) { + in_backprop(b, h_in_max, w_in_max, d) += + out_backprop(b, h_out, w_out, d); + } } } } @@ -349,6 +353,7 @@ class DilationBackpropFilterOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] @@ -438,8 +443,10 @@ struct DilationBackpropFilter { } } } - filter_backprop(h_max, w_max, d) += - out_backprop(b, h_out, w_out, d); + if (h_max < filter_rows && w_max < filter_cols) { + filter_backprop(h_max, w_max, d) += + out_backprop(b, h_out, w_out, d); + } } } } From 65d8727e10873e9862b0fa9e8097b76804804a52 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 19:14:24 -0700 Subject: [PATCH 116/256] Fix breakage in parameterized_truncated_normal_op.cc PiperOrigin-RevId: 372041718 Change-Id: Iff79e77a2bb27032423eefcb84211627b27dfe81 --- tensorflow/core/kernels/parameterized_truncated_normal_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc index a63457551ac29b..116df3541d7cf6 100644 --- a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc +++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc @@ -627,6 +627,9 @@ class ParameterizedTruncatedNormalOp : public OpKernel { ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); + OP_REQUIRES(ctx, shape_tensor.NumElements() > 0, + errors::InvalidArgument("Shape tensor must not be empty, got ", + shape_tensor.DebugString())); int32 num_batches = shape_tensor.flat()(0); int32 samples_per_batch = 1; From 971743b211dd23c2e7ae17f498206ed4b245e04e Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 21:30:50 -0700 Subject: [PATCH 117/256] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseDenseCwiseMul`. PiperOrigin-RevId: 372054410 Change-Id: Ifcce0491e2e3816838c87e73be30a1e61b65174d --- tensorflow/core/kernels/sparse_dense_binary_op_shared.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index 3a5e66a0e73ea6..dac4a3d3e6bfcd 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -78,6 +78,11 @@ class SparseDenseBinaryOpShared : public OpKernel { "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); + OP_REQUIRES( + ctx, values_t->dim_size(0) == indices_t->dim_size(0), + errors::InvalidArgument( + "The first dimension of values and indices should match. (", + values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); const auto indices_mat = indices_t->matrix(); const auto shape_vec = shape_t->vec(); From 8e7f060ae5caf68e72c6d5f5d1350cac7c3f8b53 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 08:38:03 -0700 Subject: [PATCH 118/256] Fix heap buffer overflow PiperOrigin-RevId: 372132844 Change-Id: Idef9895efaf145f2b1c23d31983601ec980cd5e4 --- tensorflow/core/kernels/maxpooling_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..f1193f0ef4887c 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -984,6 +984,9 @@ struct LaunchMaxPoolingGradWithArgmax { const int input_start = start * input_size_per_batch; const int input_end = limit * input_size_per_batch; for (int64 index = input_start; index < input_end; index++) { + if (index >= argmax.NumElements()) { + break; + } int64 grad_out_index = argmax_flat(index); if (!include_batch_in_index) { const int64 cur_batch = index / input_size_per_batch; From 5c60afa8fad3d0903c4938b4a194974120a20a43 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Wed, 5 May 2021 08:16:13 -0700 Subject: [PATCH 119/256] Fix out of bound read in requantization_range_op.cc PiperOrigin-RevId: 372129031 Change-Id: Ie684ab98a3840c5186ead3eafffc0e0ed0e8030d --- tensorflow/core/kernels/requantization_range_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/requantization_range_op.cc b/tensorflow/core/kernels/requantization_range_op.cc index cc6e891a6b352b..f6e217499d1983 100644 --- a/tensorflow/core/kernels/requantization_range_op.cc +++ b/tensorflow/core/kernels/requantization_range_op.cc @@ -46,6 +46,10 @@ class RequantizationRangeOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES(ctx, ctx->input(1).NumElements() > 0, + errors::InvalidArgument("Input min must not be empty.")); + OP_REQUIRES(ctx, ctx->input(2).NumElements() > 0, + errors::InvalidArgument("Input max must not be empty.")); const float input_min_float = ctx->input(1).flat()(0); const float input_max_float = ctx->input(2).flat()(0); Tensor* output_min = nullptr; From d9d7ac570374661a0a2ce1b985b6bd6ac339f756 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 18:05:46 -0700 Subject: [PATCH 120/256] Fix memory corruption issue with `tf.raw_ops.DrawBoundingBoxesV2`. PiperOrigin-RevId: 372033910 Change-Id: I8a9f4efc1c8ddaacbc26ec1fbe4bfdd6791c226d --- tensorflow/core/kernels/draw_bounding_box_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/draw_bounding_box_op.cc b/tensorflow/core/kernels/draw_bounding_box_op.cc index 73db76333f0862..926ea368a58ba8 100644 --- a/tensorflow/core/kernels/draw_bounding_box_op.cc +++ b/tensorflow/core/kernels/draw_bounding_box_op.cc @@ -73,6 +73,12 @@ class DrawBoundingBoxesOp : public OpKernel { errors::InvalidArgument("Channel depth should be either 1 (GRY), " "3 (RGB), or 4 (RGBA)")); + OP_REQUIRES( + context, boxes.dim_size(2) == 4, + errors::InvalidArgument( + "The size of the third dimension of the box must be 4. Received: ", + boxes.dim_size(2))); + const int64 batch_size = images.dim_size(0); const int64 height = images.dim_size(1); const int64 width = images.dim_size(2); From 271fba6e4af2faaf37162ab372bbe3d3e5de06b0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 11:40:50 -0700 Subject: [PATCH 121/256] Add several missing validations in SDCA PiperOrigin-RevId: 372172877 Change-Id: Id366da962432e18dcbfac847d11e98488bebb70a --- tensorflow/core/kernels/sdca_internal.cc | 36 ++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc index cbc754af0e9bb1..11a3be8bf46a76 100644 --- a/tensorflow/core/kernels/sdca_internal.cc +++ b/tensorflow/core/kernels/sdca_internal.cc @@ -99,6 +99,10 @@ Status ModelWeights::Initialize(OpKernelContext* const context) { OpInputList sparse_weights_inputs; TF_RETURN_IF_ERROR( context->input_list("sparse_weights", &sparse_weights_inputs)); + if (sparse_indices_inputs.size() != sparse_weights_inputs.size()) + return errors::InvalidArgument( + "sparse_indices and sparse_weights must have the same length, got ", + sparse_indices_inputs.size(), " and ", sparse_weights_inputs.size()); OpInputList dense_weights_inputs; TF_RETURN_IF_ERROR( context->input_list("dense_weights", &dense_weights_inputs)); @@ -106,10 +110,20 @@ Status ModelWeights::Initialize(OpKernelContext* const context) { OpOutputList sparse_weights_outputs; TF_RETURN_IF_ERROR(context->output_list("out_delta_sparse_weights", &sparse_weights_outputs)); + if (sparse_weights_outputs.size() != sparse_weights_inputs.size()) + return errors::InvalidArgument( + "out_delta_sparse_weights and sparse_weights must have the same " + "length, got ", + sparse_weights_outputs.size(), " and ", sparse_weights_inputs.size()); OpOutputList dense_weights_outputs; TF_RETURN_IF_ERROR( context->output_list("out_delta_dense_weights", &dense_weights_outputs)); + if (dense_weights_outputs.size() != dense_weights_inputs.size()) + return errors::InvalidArgument( + "out_delta_dense_weights and dense_weights must have the same length, " + "got ", + dense_weights_outputs.size(), " and ", dense_weights_inputs.size()); for (int i = 0; i < sparse_weights_inputs.size(); ++i) { Tensor* delta_t; @@ -327,13 +341,28 @@ Status Examples::Initialize(OpKernelContext* const context, OpInputList sparse_example_indices_inputs; TF_RETURN_IF_ERROR(context->input_list("sparse_example_indices", &sparse_example_indices_inputs)); + if (sparse_example_indices_inputs.size() != num_sparse_features) + return errors::InvalidArgument( + "Expected ", num_sparse_features, + " tensors in sparse_example_indices but got ", + sparse_example_indices_inputs.size()); OpInputList sparse_feature_indices_inputs; TF_RETURN_IF_ERROR(context->input_list("sparse_feature_indices", &sparse_feature_indices_inputs)); + if (sparse_feature_indices_inputs.size() != num_sparse_features) + return errors::InvalidArgument( + "Expected ", num_sparse_features, + " tensors in sparse_feature_indices but got ", + sparse_feature_indices_inputs.size()); OpInputList sparse_feature_values_inputs; if (num_sparse_features_with_values > 0) { TF_RETURN_IF_ERROR(context->input_list("sparse_feature_values", &sparse_feature_values_inputs)); + if (sparse_feature_values_inputs.size() != num_sparse_features_with_values) + return errors::InvalidArgument( + "Expected ", num_sparse_features_with_values, + " tensors in sparse_feature_values but got ", + sparse_feature_values_inputs.size()); } const Tensor* example_weights_t; @@ -400,6 +429,13 @@ Status Examples::CreateSparseFeatureRepresentation( sparse_example_indices_inputs[i].template flat(); auto feature_indices = sparse_feature_indices_inputs[i].template flat(); + if (example_indices.size() != feature_indices.size()) { + mutex_lock l(mu); + result = errors::InvalidArgument( + "Found mismatched example_indices and feature_indices [", + example_indices, "] vs [", feature_indices, "]"); + return; + } // Parse features for each example. Features for a particular example // are at the offsets (start_id, end_id] From 76df3b25de25bcb6f7de1a2ec1aab9fb3299b855 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 12:07:57 -0700 Subject: [PATCH 122/256] Add missing validations to reverse_sequence_op PiperOrigin-RevId: 372178683 Change-Id: Iac97ebab5b342f1262c77a7d9bcb4267b305ce5b --- tensorflow/core/kernels/reverse_sequence_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/reverse_sequence_op.cc b/tensorflow/core/kernels/reverse_sequence_op.cc index b5b62bc76ca524..1282deb26e8cd6 100644 --- a/tensorflow/core/kernels/reverse_sequence_op.cc +++ b/tensorflow/core/kernels/reverse_sequence_op.cc @@ -115,6 +115,10 @@ class ReverseSequenceOp : public OpKernel { : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("batch_dim", &batch_dim_)); OP_REQUIRES_OK(context, context->GetAttr("seq_dim", &seq_dim_)); + OP_REQUIRES(context, batch_dim_ >= 0, + errors::InvalidArgument("Invalid batch_dim ", batch_dim_)); + OP_REQUIRES(context, seq_dim_ >= 0, + errors::InvalidArgument("Invalid seq_dim ", seq_dim_)); } void Compute(OpKernelContext* context) override { From c791f0d8ca2f69e2b91e956f7a0370c9c6cea9bb Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 14:34:54 -0700 Subject: [PATCH 123/256] Don't do any work if output tensor is null (prevent div by 0) PiperOrigin-RevId: 372208700 Change-Id: Iea6b6293e887ade8538facfdb50fb931e17f511e --- tensorflow/core/kernels/maxpooling_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..9ac7ac6132c4e1 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -1049,6 +1049,8 @@ class MaxPoolingGradWithArgmaxOp : public OpKernel { OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); + if (out_shape.num_elements() == 0) return; // nothing to be done + LaunchMaxPoolingGradWithArgmax::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } From 53b19e42f63208c5de40bbe509e5befa56f24269 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 15:20:14 -0700 Subject: [PATCH 124/256] Add missing validation to pooling_ops_3d PiperOrigin-RevId: 372218727 Change-Id: I6b9ed4266aa7286c02f1f230d7bea922c1be547e --- tensorflow/core/kernels/pooling_ops_3d.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 532d861e6158e5..bb7a37eac53e11 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -704,6 +704,19 @@ class MaxPooling3dGradGradOp : public OpKernel { OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); + // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must + // have elements. + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_in: ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_out: ", + tensor_out.DebugString())); + OP_REQUIRES( + context, out_grad_backprop.NumElements() > 0, + errors::InvalidArgument("received empty tensor out_grad_backprop: ", + out_grad_backprop.DebugString())); + LaunchMaxPooling3dGradGradOp::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } From 1689bd67b6766afe9604a750d4a2e1934aa3d276 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 18:07:02 -0700 Subject: [PATCH 125/256] [CherryPick]Add missing validation, prevent heap OOB --- tensorflow/core/kernels/pooling_ops_3d.cc | 25 +++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 532d861e6158e5..3ae363da22bed4 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -699,11 +699,36 @@ class MaxPooling3dGradGradOp : public OpKernel { Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; + if (!context->status().ok()) return; // params is invalid Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); + // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must + // have elements. + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_in: ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_out: ", + tensor_out.DebugString())); + OP_REQUIRES( + context, out_grad_backprop.NumElements() > 0, + errors::InvalidArgument("received empty tensor out_grad_backprop: ", + out_grad_backprop.DebugString())); + OP_REQUIRES(context, + tensor_in.NumElements() == out_grad_backprop.NumElements(), + errors::InvalidArgument("tensor_in and out_grad_backprop must " + "have same number of elements, got <", + tensor_in.DebugString(), "> and <", + out_grad_backprop.DebugString(), ">")); + OP_REQUIRES( + context, tensor_out.NumElements() == output->NumElements(), + errors::InvalidArgument( + "tensor_out and output must have same number of elements, got <", + tensor_out.DebugString(), "> and <", output->DebugString(), ">")); + LaunchMaxPooling3dGradGradOp::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } From 7dd02c934ff7470e8945aef3046baf3d5ae09725 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 5 May 2021 21:09:21 -0700 Subject: [PATCH 126/256] Fix nullptr deref in `tf.raw_ops.CTCLoss`. PiperOrigin-RevId: 372266334 Change-Id: Ic52c3e9f13a38f54482d670907eda1688450862b --- tensorflow/core/kernels/ctc_loss_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc index 6358e82fdda853..def01854c8a34e 100644 --- a/tensorflow/core/kernels/ctc_loss_op.cc +++ b/tensorflow/core/kernels/ctc_loss_op.cc @@ -105,6 +105,9 @@ class CTCLossOp : public OpKernel { const TensorShape& inputs_shape = inputs->shape(); const int64 max_time = inputs_shape.dim_size(0); + OP_REQUIRES(ctx, max_time != 0, + errors::InvalidArgument( + "Max time or first dimension of input cannot be 0.")); const int64 batch_size = inputs_shape.dim_size(1); const int64 num_classes_raw = inputs_shape.dim_size(2); OP_REQUIRES( From 0418c69d856c85f572a86fb51950c9752484c869 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 5 May 2021 17:33:47 -0700 Subject: [PATCH 127/256] Fix OOB read issue with `tf.raw_ops.CTCLoss`. PiperOrigin-RevId: 372242187 Change-Id: I347228ed8c04e1d2eb9d2479ae52f51d1b512c6e --- tensorflow/core/kernels/ctc_loss_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc index 6358e82fdda853..b0e298a0f329f0 100644 --- a/tensorflow/core/kernels/ctc_loss_op.cc +++ b/tensorflow/core/kernels/ctc_loss_op.cc @@ -100,6 +100,10 @@ class CTCLossOp : public OpKernel { errors::InvalidArgument("sequence_length is not a vector")); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(labels_indices->shape()), errors::InvalidArgument("labels_indices is not a matrix")); + OP_REQUIRES(ctx, labels_indices->dim_size(1) > 1, + errors::InvalidArgument( + "labels_indices second dimension must be >= 1. Received ", + labels_indices->dim_size(1))); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(labels_values->shape()), errors::InvalidArgument("labels_values is not a vector")); From a9be6e544b6ac2849caf96ece65c3a17704e63ab Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 09:51:26 -0700 Subject: [PATCH 128/256] Fix assertion failure in pooling_ops_3d PiperOrigin-RevId: 372364504 Change-Id: Iecde4fe26b47a8fa935d6e2611b5585ed5777781 --- tensorflow/core/kernels/pooling_ops_3d.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 532d861e6158e5..2e44f4a221e757 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -389,6 +389,19 @@ struct LaunchAvgPooling3dGradOp { const std::array& output_shape, const std::array& padding, TensorFormat data_format, Tensor* output) { + OP_REQUIRES( + context, tensor_in_shape.dim_size(0) == out_backprop.dim_size(0), + errors::InvalidArgument( + "Expected first dimension of tensor_in_shape and " + "out_backprop to match, got ", + tensor_in_shape.dim_size(0), " and ", out_backprop.dim_size(0))); + OP_REQUIRES( + context, tensor_in_shape.dim_size(4) == out_backprop.dim_size(4), + errors::InvalidArgument( + "Expected last dimension of tensor_in_shape and " + "out_backprop to match, got ", + tensor_in_shape.dim_size(4), " and ", out_backprop.dim_size(4))); + output->flat().setZero(); std::array input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), From 61bff3aa1e56b143a4ee15053ceacc205459e39c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 22:39:29 -0700 Subject: [PATCH 129/256] Validate arguments of `FractionalMaxPoolGrad` PiperOrigin-RevId: 372274982 Change-Id: If46b0c442efa4eaef635ce6a476717060420122c --- tensorflow/core/kernels/fractional_max_pool_op.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tensorflow/core/kernels/fractional_max_pool_op.cc b/tensorflow/core/kernels/fractional_max_pool_op.cc index 619a3507ce415f..1a2a783d135c54 100644 --- a/tensorflow/core/kernels/fractional_max_pool_op.cc +++ b/tensorflow/core/kernels/fractional_max_pool_op.cc @@ -235,6 +235,20 @@ class FractionalMaxPoolGradOp : public OpKernel { // Just to make it similar to FractionalMaxPoolOp. constexpr int tensor_in_and_out_dims = 4; + OP_REQUIRES( + context, tensor_in.dims() == tensor_in_and_out_dims, + errors::InvalidArgument("orig_input should be a tensor of rank 4, got ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("orig_input must not be empty, got ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.dims() == tensor_in_and_out_dims, + errors::InvalidArgument( + "orig_output should be a tensor of rank 4, got ", + tensor_out.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("orig_output must not be empty, got ", + tensor_out.DebugString())); std::vector input_size(tensor_in_and_out_dims); std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { From 4458cb353933bda9e4c60d58b2204269ba82d815 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:02:47 -0700 Subject: [PATCH 130/256] Validate inputs of `FractionalAvgPoolGrad`. PiperOrigin-RevId: 372420640 Change-Id: Icc583928e6cdc3062e12498e4d2337a8fe3da016 --- tensorflow/core/kernels/fractional_avg_pool_op.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index b8a5083e5340f1..0452638a066795 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -250,6 +250,19 @@ class FractionalAvgPoolGradOp : public OpKernel { const int64 out_cols = out_backprop.dim_size(2); const int64 out_depth = out_backprop.dim_size(3); + OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, + errors::InvalidArgument("Given out_backprop shape ", + out_backprop.shape().DebugString(), + ", row_seq_tensor must have at least ", + out_rows + 1, " elements, but got ", + row_seq_tensor.NumElements())); + OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, + errors::InvalidArgument("Given out_backprop shape ", + out_backprop.shape().DebugString(), + ", col_seq_tensor must have at least ", + out_cols + 1, " elements, but got ", + col_seq_tensor.NumElements())); + auto row_seq_tensor_flat = row_seq_tensor.flat(); auto col_seq_tensor_flat = col_seq_tensor.flat(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat(); From 7155a0ba42441ffbd32bb1edc72eff1a32313f2b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:24:09 -0700 Subject: [PATCH 131/256] Prevent heap OOB error in `MaxPoolGrad` PiperOrigin-RevId: 372424854 Change-Id: Idac0f23867ad8b0601cafbaaa52d5e64269e63a7 --- tensorflow/core/kernels/maxpooling_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..5b1f084d886516 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -192,7 +192,9 @@ static void SpatialMaxPoolWithArgMaxHelper( // CHECK(input_backprop_index >= in_start && input_backprop_index < // in_end) FastBoundsCheck(input_backprop_index - in_start, in_end - in_start); - input_backprop_flat(input_backprop_index) += out_backprop_flat(index); + if (index < out_backprop.NumElements()) { + input_backprop_flat(input_backprop_index) += out_backprop_flat(index); + } } } }; From 31ff1f20231bd9f22a9418d32c2e18d82a3fd17f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:51:41 -0700 Subject: [PATCH 132/256] Fix SEGV in CTC ops PiperOrigin-RevId: 372430279 Change-Id: I7ec2ad9d6f4d0980c33de45d27c6b17df5c6e26f --- tensorflow/core/kernels/ctc_decoder_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc index 22681f97437f0c..9efdac60e369c2 100644 --- a/tensorflow/core/kernels/ctc_decoder_ops.cc +++ b/tensorflow/core/kernels/ctc_decoder_ops.cc @@ -70,6 +70,9 @@ class CTCDecodeHelper { if (inputs_shape.dims() != 3) { return errors::InvalidArgument("inputs is not a 3-Tensor"); } + if (inputs_shape.num_elements() == 0) { + return errors::InvalidArgument("inputs must not be empty"); + } const int64 max_time = inputs_shape.dim_size(0); const int64 batch_size = inputs_shape.dim_size(1); From 48083ad391775a008d40d7dfee007b98406bb0a2 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 15:31:05 -0700 Subject: [PATCH 133/256] Fix heap OOB read in dequantize op. Also fixes SEGV in same op PiperOrigin-RevId: 372437896 Change-Id: I135e94d360c2a1ce374c10f7e0fed1af603dbc02 --- tensorflow/core/kernels/dequantize_op.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/dequantize_op.cc b/tensorflow/core/kernels/dequantize_op.cc index 3b38daf006768b..54c0f9de572f59 100644 --- a/tensorflow/core/kernels/dequantize_op.cc +++ b/tensorflow/core/kernels/dequantize_op.cc @@ -98,6 +98,18 @@ class DequantizeOp : public OpKernel { if (axis_ > -1) { num_slices = input.dim_size(axis_); } + OP_REQUIRES(ctx, input_min_tensor.NumElements() == num_slices, + errors::InvalidArgument( + "input_min_tensor must have as many elements as input on " + "the dequantization axis (", + axis_, "), got ", input_min_tensor.NumElements(), + ", expected ", num_slices)); + OP_REQUIRES(ctx, input_max_tensor.NumElements() == num_slices, + errors::InvalidArgument( + "input_max_tensor must have as many elements as input on " + "the dequantization axis (", + axis_, "), got ", input_max_tensor.NumElements(), + ", expected ", num_slices)); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); From 321733ea9e82d4cae65debde449181715d69408e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 15:55:00 -0700 Subject: [PATCH 134/256] Prevent overflow in sparse op PiperOrigin-RevId: 372442006 Change-Id: I60fe31cd7e56fb3501e97c63500caf902ddeee96 --- tensorflow/core/kernels/sparse_split_op.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index 3d02be47cbbef5..ca3e77f76af7f7 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -63,11 +63,18 @@ class SparseSplitOp : public OpKernel { input_shape.vec()(split_dim), "), got ", num_split_)); + // Prevent overflow by constructing the dense shape separately + TensorShape dense_shape; + const auto input_shape_flat = input_shape.flat(); + for (int i = 0; i < input_shape.NumElements(); i++) { + OP_REQUIRES_OK(context, + dense_shape.AddDimWithStatus(input_shape_flat(i))); + } + sparse::SparseTensor sparse_tensor; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create( - input_indices, input_values, - TensorShape(input_shape.vec()), &sparse_tensor)); + sparse::SparseTensor::Create(input_indices, input_values, + dense_shape, &sparse_tensor)); std::vector outputs; OP_REQUIRES_OK(context, From 3c40555eda8b239264347d4ca065162786e68191 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 15:37:08 -0700 Subject: [PATCH 135/256] Fix division by zero in TFLite padding. PiperOrigin-RevId: 370777494 Change-Id: Ic1331e4a1603b9e4c8aa183012a6c8237410aa0f --- tensorflow/lite/kernels/padding.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/lite/kernels/padding.h b/tensorflow/lite/kernels/padding.h index 1116b1da852cf6..6b4ab7fa58d1aa 100644 --- a/tensorflow/lite/kernels/padding.h +++ b/tensorflow/lite/kernels/padding.h @@ -44,6 +44,11 @@ inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, inline int ComputeOutSize(TfLitePadding padding, int image_size, int filter_size, int stride, int dilation_rate = 1) { int effective_filter_size = (filter_size - 1) * dilation_rate + 1; + + // TODO(b/186448822): This uses 0 since the function has no other way to + // report error case + if (stride == 0) return 0; + switch (padding) { case kTfLitePaddingSame: return (image_size + stride - 1) / stride; From 55e88231144464012b66fd0435e5a1c83a1aca70 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:45:43 -0700 Subject: [PATCH 136/256] Prevent another div by 0 in optimized pooling implementations TFLite PiperOrigin-RevId: 370800091 Change-Id: I2119352f57fb5ca4f2051e0e2d749403304a979b --- tensorflow/lite/kernels/pooling.cc | 4 ++++ tensorflow/lite/kernels/pooling_test.cc | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/tensorflow/lite/kernels/pooling.cc b/tensorflow/lite/kernels/pooling.cc index a1380080a1eb03..6c97824cae6943 100644 --- a/tensorflow/lite/kernels/pooling.cc +++ b/tensorflow/lite/kernels/pooling.cc @@ -85,6 +85,10 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto padding = params->padding; int out_width, out_height; + // Prevent division by 0 in optimized pooling implementations + TF_LITE_ENSURE(context, params->stride_height > 0); + TF_LITE_ENSURE(context, params->stride_width > 0); + data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, diff --git a/tensorflow/lite/kernels/pooling_test.cc b/tensorflow/lite/kernels/pooling_test.cc index e614fedccfd500..108195388141df 100644 --- a/tensorflow/lite/kernels/pooling_test.cc +++ b/tensorflow/lite/kernels/pooling_test.cc @@ -1151,5 +1151,18 @@ TEST(FloatPoolingOpTest, L2PoolPaddingValidSlide1) { EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.0, 6.5})); } +#ifdef GTEST_HAS_DEATH_TEST +TEST(FloatPoolingOpTest, MaxPoolWithZeroStride) { + EXPECT_DEATH( + FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D, + /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}}, + /*filter_width=*/2, /*filter_height=*/2, + /*output=*/{TensorType_FLOAT32, {}}, + /*padding=*/Padding_VALID, + /*stride_w=*/0, /*stride_h=*/0), + "Cannot allocate tensors"); +} +#endif + } // namespace } // namespace tflite From 46f9926845371ab318e480bd436baab0d936bc8f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:45:57 -0700 Subject: [PATCH 137/256] Prevent one more div by 0 in TFLite PiperOrigin-RevId: 370800114 Change-Id: I6b956aeb8c458cc6f514408d2e89ffacfe249e57 --- tensorflow/lite/kernels/space_to_depth.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/space_to_depth.cc b/tensorflow/lite/kernels/space_to_depth.cc index ac001d903a466c..e01381466300b6 100644 --- a/tensorflow/lite/kernels/space_to_depth.cc +++ b/tensorflow/lite/kernels/space_to_depth.cc @@ -58,6 +58,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; int output_height = input_height / block_size; From 6f5f9b021685c414846b3109f46057834270fbbf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:10 -0700 Subject: [PATCH 138/256] Handle one more division by 0 in TFLite. PiperOrigin-RevId: 370800140 Change-Id: I9ab42e5aaccf02f226d1282611490a54cf7d273e --- tensorflow/lite/kernels/gather_nd.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/gather_nd.cc b/tensorflow/lite/kernels/gather_nd.cc index fd31b8c4ddd709..d84621918133fe 100644 --- a/tensorflow/lite/kernels/gather_nd.cc +++ b/tensorflow/lite/kernels/gather_nd.cc @@ -144,6 +144,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* indices = GetInput(context, node, kIndices); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + // Prevent division by 0 in the helper + TF_LITE_ENSURE(context, NumElements(params) > 0); + switch (indices->type) { case kTfLiteInt32: return EvalGatherNd(context, params, indices, output); From bb8bdb05722ced7f074e761bd1ba477520b5b9e1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:25 -0700 Subject: [PATCH 139/256] Fix another division by 0 in TFLite PiperOrigin-RevId: 370800181 Change-Id: I924809166a6131f5075e6d45c455106538d755f9 --- tensorflow/lite/kernels/transpose_conv.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/lite/kernels/transpose_conv.cc b/tensorflow/lite/kernels/transpose_conv.cc index 07dc4bbac53452..079d3bd381221f 100644 --- a/tensorflow/lite/kernels/transpose_conv.cc +++ b/tensorflow/lite/kernels/transpose_conv.cc @@ -573,6 +573,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); + // Prevent divisions by 0 + TF_LITE_ENSURE(context, params->stride_height > 0); + TF_LITE_ENSURE(context, params->stride_width > 0); + // Resize any deferred dynamic tensors if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeTensor(context, output_shape, output)); From a3619eff1db3f3bcc11497040b37ed70a23f896e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:38 -0700 Subject: [PATCH 140/256] Prevent a null pointer exception in TFLite PiperOrigin-RevId: 370800206 Change-Id: Idd437ebce4ff224120d8eefc1c14c062173b71d6 --- tensorflow/lite/kernels/maximum_minimum.cc | 60 +++++++++++----------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 777e51442f120e..176e020a5a8e55 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -157,35 +157,37 @@ template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, - op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, - op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, - op_context); - break; - case kTfLiteInt16: - TFLiteOperation(context, node, - op_context); - break; - default: - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; - } + // If inputs have no element, shortcircuit. + if (NumElements(op_context.input1) == 0 || + NumElements(op_context.input2) == 0) { + return kTfLiteOk; + } + + switch (op_context.output->type) { + case kTfLiteFloat32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteUInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt64: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt16: + TFLiteOperation(context, node, op_context); + break; + default: + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; + } return kTfLiteOk; } From 0687d0bfb858af7c036300ede99b5d8860769984 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:47:59 -0700 Subject: [PATCH 141/256] Prevent a null pointer dereference in TFLite. PiperOrigin-RevId: 370800353 Change-Id: Ic9c9712ce5c6e384c954dcd640a5bd9ff05c9a05 --- tensorflow/lite/core/subgraph.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 00a37815d21a4a..b76be1fcf204e8 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -973,10 +973,17 @@ TfLiteStatus Subgraph::Invoke() { TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index)); } if (tensor->data.raw == nullptr && tensor->bytes > 0) { - if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) { + if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1 && + tensor->dims->size != 1) { // In general, having a tensor here with no buffer will be an error. - // However, for the reshape operator, the second input tensor is only - // used for the shape, not for the data. Thus, null buffer is ok. + // However, for the reshape operator, the second input tensor is + // sometimes only used for the shape, not for the data. Thus, null + // buffer is ok in this situation. + // The situation where null buffer is not ok for reshape operator is + // only when there are 2 inputs given to the node and the one + // corresponding to the shape (i == 1) is a vector that contains all + // dimensions. See `GetOutputShape()` function in + // `tensorflow/lite/kernels/reshape.cc` continue; } else { // In all other cases, we need to return an error as otherwise we will From 070271128ad828026ebaa36569a7ec79fa620083 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:47:46 -0700 Subject: [PATCH 142/256] Prevent infinite loop/stack overflow in TFLite `while` op. PiperOrigin-RevId: 370800333 Change-Id: I6a2e4ff849da339545c449db2af7e11ce6ff02c3 --- tensorflow/lite/kernels/while.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/lite/kernels/while.cc b/tensorflow/lite/kernels/while.cc index b50cdff99741b1..164edfa92809de 100644 --- a/tensorflow/lite/kernels/while.cc +++ b/tensorflow/lite/kernels/while.cc @@ -135,6 +135,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* subgraphs = this_subgraph->GetSubgraphs(); TF_LITE_ENSURE(context, op_data->cond_subgraph_index < subgraphs->size()); TF_LITE_ENSURE(context, op_data->body_subgraph_index < subgraphs->size()); + TF_LITE_ENSURE(context, + op_data->cond_subgraph_index != op_data->body_subgraph_index); Subgraph* cond_subgraph = (*subgraphs)[op_data->cond_subgraph_index].get(); Subgraph* body_subgraph = (*subgraphs)[op_data->body_subgraph_index].get(); From 1194fd66eb6fddbf8a7704645d972c73eaa89c0a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:37:35 -0700 Subject: [PATCH 143/256] CherryPick]:Prevent division by 0. --- tensorflow/lite/kernels/conv.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 81069de1abe890..4326543ac62209 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -501,6 +501,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, // Only one scale factor per batch is typically necessary. See optimized // implementation for why we need to allocate for the height of the inputs // flattened to 2D. + TF_LITE_ENSURE(context, channels_in != 0); const int height = NumElements(input) / channels_in; int scaling_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { @@ -539,6 +540,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; // See above comment for the need to allocate for height of inputs. + TF_LITE_ENSURE(context, channels_in != 0); const int height = NumElements(input) / channels_in; const int input_offset_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, @@ -800,8 +802,9 @@ void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); - const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; int8_t* quantized_input_ptr_batch = GetTensorData( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData( @@ -878,8 +881,9 @@ void EvalHybrid(TfLiteContext* context, TfLiteNode* node, CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); - const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; const float* input_ptr = GetTensorData(input); int8_t* quantized_input_ptr_batch = GetTensorData( From aed365181e01cd99eaac4ca9b8baaf506df1df71 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:57:00 -0700 Subject: [PATCH 144/256] Prevent division by 0 PiperOrigin-RevId: 370966645 Change-Id: I831bfd96c7eb77b02d7ebb744335f59f6e5728cb --- tensorflow/lite/kernels/embedding_lookup.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/lite/kernels/embedding_lookup.cc b/tensorflow/lite/kernels/embedding_lookup.cc index 36e0737c7e2830..ea8fd5431a99cf 100644 --- a/tensorflow/lite/kernels/embedding_lookup.cc +++ b/tensorflow/lite/kernels/embedding_lookup.cc @@ -68,6 +68,10 @@ TfLiteStatus EvalSimple(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* lookup, const TfLiteTensor* value, TfLiteTensor* output) { const int row_size = SizeOfDimension(value, 0); + if (row_size == 0) { + // Propagate empty tensor if input is empty + return kTfLiteOk; + } const int row_bytes = value->bytes / row_size; char* output_raw = GetTensorData(output); From 9e0ebe4a9ba554313ed49fa8f02c3a197e0b9749 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 13:57:37 -0700 Subject: [PATCH 145/256] Prevent division by 0 PiperOrigin-RevId: 370979352 Change-Id: Ic79191c316d986fc6072ecaebfec9d5f2b924d00 --- tensorflow/lite/kernels/batch_to_space_nd.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/batch_to_space_nd.cc b/tensorflow/lite/kernels/batch_to_space_nd.cc index 9d6492e0fcbf06..044ac1b3a5ee5d 100644 --- a/tensorflow/lite/kernels/batch_to_space_nd.cc +++ b/tensorflow/lite/kernels/batch_to_space_nd.cc @@ -78,6 +78,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, int output_batch_size = input_size->data[0]; for (int dim = 0; dim < spatial_dims_num; ++dim) { // Number of batch must be multiple of (block_shape[dim]). + TF_LITE_ENSURE(context, block_shape[dim] != 0); TF_LITE_ENSURE_EQ(context, output_batch_size % block_shape[dim], 0); output_batch_size = output_batch_size / block_shape[dim]; output_size->data[dim + 1] = input_size->data[dim + 1] * block_shape[dim] - From f05a9be363ec565c18e6e2116d14db19f239d32b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 14:22:30 -0700 Subject: [PATCH 146/256] Prevent division by 0 PiperOrigin-RevId: 370984990 Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de --- tensorflow/lite/kernels/space_to_batch_nd.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/space_to_batch_nd.cc b/tensorflow/lite/kernels/space_to_batch_nd.cc index 0d537e2d1892fe..af7b9d9e914a1e 100644 --- a/tensorflow/lite/kernels/space_to_batch_nd.cc +++ b/tensorflow/lite/kernels/space_to_batch_nd.cc @@ -79,6 +79,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, for (int dim = 0; dim < spatial_dims_num; ++dim) { int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] + paddings_data[dim * 2 + 1]); + TF_LITE_ENSURE(context, block_shape[dim] != 0); TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0); output_size->data[dim + 1] = final_dim_size / block_shape[dim]; output_batch_size *= block_shape[dim]; From 762ce5297f902439a0bb982bdf9a2878286aeaf1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 14:22:30 -0700 Subject: [PATCH 147/256] Prevent division by 0 PiperOrigin-RevId: 370984990 Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de --- tensorflow/lite/kernels/space_to_batch_nd.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/space_to_batch_nd.cc b/tensorflow/lite/kernels/space_to_batch_nd.cc index 0d537e2d1892fe..af7b9d9e914a1e 100644 --- a/tensorflow/lite/kernels/space_to_batch_nd.cc +++ b/tensorflow/lite/kernels/space_to_batch_nd.cc @@ -79,6 +79,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, for (int dim = 0; dim < spatial_dims_num; ++dim) { int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] + paddings_data[dim * 2 + 1]); + TF_LITE_ENSURE(context, block_shape[dim] != 0); TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0); output_size->data[dim + 1] = final_dim_size / block_shape[dim]; output_batch_size *= block_shape[dim]; From 6b023f56f3076a78b685baf62b17f70875d43bc7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:13:03 -0700 Subject: [PATCH 148/256] Prevent division by 0 PiperOrigin-RevId: 370995582 Change-Id: I670ffaf52d1ff8823ec31ea5f438f9125b402223 --- tensorflow/lite/kernels/svdf.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/svdf.cc b/tensorflow/lite/kernels/svdf.cc index 1b8bf904b8ac31..267f07951f9e76 100644 --- a/tensorflow/lite/kernels/svdf.cc +++ b/tensorflow/lite/kernels/svdf.cc @@ -96,6 +96,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const int rank = params->rank; const int batch_size = input->dims->data[0]; const int num_filters = weights_feature->dims->data[0]; + TF_LITE_ENSURE(context, rank != 0); TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); const int num_units = num_filters / rank; const int memory_size = weights_time->dims->data[1]; From 888a2a55b40e1e10aea5660c1819679706b4dc6a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:31:26 -0700 Subject: [PATCH 149/256] Prevent division by 0 PiperOrigin-RevId: 370998952 Change-Id: I6b1d49079624ee1447d2d9b53a8976fb356cc8f5 --- tensorflow/lite/kernels/split.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/split.cc b/tensorflow/lite/kernels/split.cc index 3b7781f409e2fe..dbd7384b487c20 100644 --- a/tensorflow/lite/kernels/split.cc +++ b/tensorflow/lite/kernels/split.cc @@ -58,6 +58,7 @@ TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); + TF_LITE_ENSURE(context, num_splits != 0); TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0, "Not an even split"); const int slice_size = input_size / num_splits; From 8b45bc99c6bcc784731adb9a6d0d59b21a0c56aa Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:58:07 -0700 Subject: [PATCH 150/256] Prevent division by 0 in OneHot implementation If input indices is degenerate, the implementation would do a divide by zero. See https://github.com/tensorflow/tensorflow/blob/745d57df6d5e9bc568666a2a48ed8dd629c27241/tensorflow/lite/kernels/one_hot.cc#L68-L72 PiperOrigin-RevId: 370966870 Change-Id: Ie018337811c8016b5a1d3a277d00d5f2e19a2058 --- tensorflow/lite/kernels/one_hot.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/lite/kernels/one_hot.cc b/tensorflow/lite/kernels/one_hot.cc index f7b4e8e7e19d57..75bfb48d6b19c8 100644 --- a/tensorflow/lite/kernels/one_hot.cc +++ b/tensorflow/lite/kernels/one_hot.cc @@ -69,6 +69,11 @@ void OneHotComputeImpl(const OneHotContext& op_context) { for (int i = 0; i < op_context.axis; ++i) { prefix_dim_size *= op_context.indices->dims->data[i]; } + if (prefix_dim_size == 0) { + // If indices tensor is degenerate, return a degenerate tensor, just like + // TensorFlow does. + return; + } const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; const int depth = *op_context.depth->data.i32; From 7448d712c46fc2c3ef3fd13d8187c542cae4f46e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:53:48 -0700 Subject: [PATCH 151/256] Prevent divisions by 0 --- tensorflow/lite/kernels/depthwise_conv.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index 961a987cf028a0..ee0b8eb86ee2ec 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -274,8 +274,8 @@ TfLiteStatus ComputeDepthMultiplier(TfLiteContext* context, int16* depth_multiplier) { int num_filter_channels = SizeOfDimension(filter, 3); int num_input_channels = SizeOfDimension(input, 3); + TF_LITE_ENSURE(context, num_input_channels != 0); TF_LITE_ENSURE_EQ(context, num_filter_channels % num_input_channels, 0); - *depth_multiplier = num_filter_channels / num_input_channels; return kTfLiteOk; } @@ -444,8 +444,9 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); - const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; const TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); int8_t* quantized_input_ptr_batch = input_quantized->data.int8; From ab5709e56ddf43edabdc34d152bdef613350741d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 16:16:56 -0700 Subject: [PATCH 152/256] Prevent a division by 0 PiperOrigin-RevId: 371007407 Change-Id: Iecf2718de48d6bf5a69b02a9df9deda8ec1b19d3 --- tensorflow/lite/kernels/hashtable_lookup.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/hashtable_lookup.cc b/tensorflow/lite/kernels/hashtable_lookup.cc index 65e50fe41c2331..9d947107c1bc2c 100644 --- a/tensorflow/lite/kernels/hashtable_lookup.cc +++ b/tensorflow/lite/kernels/hashtable_lookup.cc @@ -101,6 +101,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); + TF_LITE_ENSURE(context, num_rows != 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; From 41f788cbeae8389d77df557a975fab73f43d29a6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 16:50:55 -0700 Subject: [PATCH 153/256] Fix integer overflow in TFLite concat PiperOrigin-RevId: 371013841 Change-Id: I6a4782ce7ca753e23ff31e7fb6aeb7f9d412cd29 --- tensorflow/lite/kernels/concatenation.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/lite/kernels/concatenation.cc b/tensorflow/lite/kernels/concatenation.cc index 5d5f06ba013645..df01792c182897 100644 --- a/tensorflow/lite/kernels/concatenation.cc +++ b/tensorflow/lite/kernels/concatenation.cc @@ -16,6 +16,8 @@ limitations under the License. #include +#include + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" @@ -68,6 +70,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, t->type, input_type); for (int d = 0; d < t0->dims->size; ++d) { if (d == axis) { + // Avoid integer overflow in sum_axis below + TF_LITE_ENSURE(context, t->dims->data[axis] >= 0); + TF_LITE_ENSURE(context, t->dims->data[axis] <= + std::numeric_limits::max() - sum_axis); sum_axis += t->dims->data[axis]; } else { TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]); From 8f42a022a62deeae0fa56191d3c41755c47d264f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Apr 2021 19:43:09 -0700 Subject: [PATCH 154/256] Fix a dangerous integer overflow and a malloc of negative size. PiperOrigin-RevId: 371254154 Change-Id: I250a98a3df26328770167025670235a963a72da0 --- tensorflow/lite/c/common.c | 6 ++++-- tensorflow/lite/kernels/embedding_lookup_sparse.cc | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/c/common.c b/tensorflow/lite/c/common.c index e6b47896528a63..9af6d5151b50fa 100644 --- a/tensorflow/lite/c/common.c +++ b/tensorflow/lite/c/common.c @@ -43,8 +43,10 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, #ifndef TF_LITE_STATIC_MEMORY TfLiteIntArray* TfLiteIntArrayCreate(int size) { - TfLiteIntArray* ret = - (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size)); + int alloc_size = TfLiteIntArrayGetSizeInBytes(size); + if (alloc_size <= 0) return NULL; + TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); + if (!ret) return ret; ret->size = size; return ret; } diff --git a/tensorflow/lite/kernels/embedding_lookup_sparse.cc b/tensorflow/lite/kernels/embedding_lookup_sparse.cc index 745b5090094687..e798c6a87391ea 100644 --- a/tensorflow/lite/kernels/embedding_lookup_sparse.cc +++ b/tensorflow/lite/kernels/embedding_lookup_sparse.cc @@ -161,6 +161,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Resize output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); + TF_LITE_ENSURE(context, output_shape != nullptr); int k = 0; int embedding_size = 1; int lookup_size = 1; From 038c7b035dfad1860fdfc5f5a2aec13a1e2e4203 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 17:50:10 -0700 Subject: [PATCH 155/256] Prevent array write out-of-bounds. If user passes an invalid axis, then we copy one too many dimensions to the output in the loop below these checks. Even if we didn't do that, there will be further issues with an invalid axis, so we check for that right now. PiperOrigin-RevId: 371023299 Change-Id: I9eca37ffc2b29e8e48710f500701270ef0790224 --- tensorflow/lite/kernels/arg_min_max.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/arg_min_max.cc b/tensorflow/lite/kernels/arg_min_max.cc index 4a3902ac57c59c..e8b7201cc4b125 100644 --- a/tensorflow/lite/kernels/arg_min_max.cc +++ b/tensorflow/lite/kernels/arg_min_max.cc @@ -42,6 +42,9 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input, axis_value += NumDimensions(input); } + TF_LITE_ENSURE(context, axis_value >= 0); + TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); + // Copy the input dimensions to output except the axis dimension. TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1); int j = 0; From a342f6d6d6aa4355dadc7c4f789f2859aee1fa02 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 18:12:15 -0700 Subject: [PATCH 156/256] Prevent array OOB read/write PiperOrigin-RevId: 371026165 Change-Id: I26ac6372c87246e03c7eb8c94e84c84d86054b36 --- tensorflow/lite/kernels/split_v.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/lite/kernels/split_v.cc b/tensorflow/lite/kernels/split_v.cc index 7d60086a91ddd2..26bbc0d37ecc06 100644 --- a/tensorflow/lite/kernels/split_v.cc +++ b/tensorflow/lite/kernels/split_v.cc @@ -94,6 +94,8 @@ TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, } } + TF_LITE_ENSURE(context, axis_value >= 0); + TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); if (minus_one_index != -1) { From 7a46fb2df53973ab5a2ce227496d445b41a629ce Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Thu, 27 May 2021 12:10:16 -0700 Subject: [PATCH 157/256] Fix 2 issues with . --- tensorflow/core/kernels/conv_ops_3d.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/conv_ops_3d.h b/tensorflow/core/kernels/conv_ops_3d.h index 9dcdea5b18f10b..8073ca5a9dfdce 100644 --- a/tensorflow/core/kernels/conv_ops_3d.h +++ b/tensorflow/core/kernels/conv_ops_3d.h @@ -56,6 +56,11 @@ struct LaunchConvOp { errors::InvalidArgument("CPU implementation of Conv3D " "currently only supports dilated rates " "of 1.")); + OP_REQUIRES(context, filter.dim_size(3) == input.dim_size(input.dims() - 1), + errors::InvalidArgument( + "Number of channels in filter (", filter.dim_size(3), + ") must match last dimension of input (", + input.dim_size(input.dims() - 1), ")")); functor::CuboidConvolution()( context->template eigen_device(), output->tensor(), input.tensor(), filter.tensor(), strides[2], strides[1], @@ -135,6 +140,8 @@ class Conv3DOp : public BinaryOpBase { const int64 filter_depth = filter.dim_size(3); const int64 out_depth = filter.dim_size(4); + OP_REQUIRES(context, filter_depth != 0, + errors::InvalidArgument("filter_depth must be non-zero")); OP_REQUIRES(context, in_depth % filter_depth == 0, errors::InvalidArgument( "Input depth must be evenly divisible by filter depth: ", From 5fc7d4bd5113c70c61d65deae00b6992dd62d6f7 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:49:50 -0700 Subject: [PATCH 158/256] Fix invalid resize. --- tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 7a5ae1c6240b55..1457e5e2c73f7d 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -175,6 +175,11 @@ class RaggedTensorToVariantOp : public OpKernel { // Unbatch the Ragged Tensor and encode the components. std::vector ragged_components; + auto batched_splits_top_vec = + batched_ragged_input.splits(0).vec(); + int num_components = batched_splits_top_vec.size() - 1; + OP_REQUIRES(context, num_components >= 0, + errors::Internal("Invalid split argument.")); OP_REQUIRES_OK(context, UnbatchRaggedZerothDim( batched_ragged_input, &ragged_components)); std::vector encoded_components(ragged_components.size()); From 47c234ec33bcde8f96b3bd0af26ae957b7e48cd7 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Fri, 28 May 2021 14:25:36 -0700 Subject: [PATCH 159/256] Fix crash with tf.transpose when a is complex and conjugate is True --- tensorflow/core/kernels/transpose_functor.h | 2 +- tensorflow/python/kernel_tests/transpose_op_test.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/transpose_functor.h b/tensorflow/core/kernels/transpose_functor.h index 0c22b11b7c6813..44193ab40273b1 100644 --- a/tensorflow/core/kernels/transpose_functor.h +++ b/tensorflow/core/kernels/transpose_functor.h @@ -19,6 +19,7 @@ limitations under the License. #include #include #include + #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/platform/logging.h" @@ -166,7 +167,6 @@ template Status DoTransposeImpl(const Device& d, const Tensor& in, const gtl::ArraySlice perm, bool conjugate, Tensor* out) { - CHECK_GE(in.dims(), 2); CHECK_EQ(in.dims(), out->dims()); CHECK_EQ(in.dims(), perm.size()); CHECK_EQ(in.dtype(), out->dtype()); diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py index 87096211a01494..ed634ae7543b54 100644 --- a/tensorflow/python/kernel_tests/transpose_op_test.py +++ b/tensorflow/python/kernel_tests/transpose_op_test.py @@ -387,6 +387,8 @@ def testDouble(self): @test_util.run_v1_only("b/120545219") def testComplex64(self): + self._testBoth(np.array(np.complex(1, 2)).astype(np.complex64)) + self._testBoth(np.complex(1, 2) * np.arange(0, 21).astype(np.complex64)) self._testBoth( np.complex(1, 2) * np.arange(0, 21).reshape([3, 7]).astype(np.complex64)) @@ -399,6 +401,8 @@ def testComplex64(self): @test_util.run_v1_only("b/120545219") def testComplex128(self): + self._testBoth(np.array(np.complex(1, 2)).astype(np.complex128)) + self._testBoth(np.complex(1, 2) * np.arange(0, 21).astype(np.complex128)) self._testBoth( np.complex(1, 2) * np.arange(0, 21).reshape([3, 7]).astype(np.complex128)) From 3cd06951623a62f756c8ccd1ab97b890483429c1 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Fri, 28 May 2021 15:56:40 -0700 Subject: [PATCH 160/256] PR #48739: Update jsoncpp to 1.9.4 --- tensorflow/workspace.bzl | 8 ++++---- third_party/jsoncpp.BUILD | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 61ac9f791410de..3e474b3750c5be 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -743,12 +743,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "jsoncpp_git", build_file = clean_dep("//third_party:jsoncpp.BUILD"), - sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0", - strip_prefix = "jsoncpp-1.9.2", + sha256 = "e34a628a8142643b976c7233ef381457efad79468c67cb1ae0b83a33d7493999", + strip_prefix = "jsoncpp-1.9.4", system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz", - "https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.4.tar.gz", + "https://github.com/open-source-parsers/jsoncpp/archive/1.9.4.tar.gz", ], ) diff --git a/third_party/jsoncpp.BUILD b/third_party/jsoncpp.BUILD index 7bc466c664f71e..3b4642c81098c7 100644 --- a/third_party/jsoncpp.BUILD +++ b/third_party/jsoncpp.BUILD @@ -13,7 +13,6 @@ cc_library( ], hdrs = [ "include/json/allocator.h", - "include/json/autolink.h", "include/json/config.h", "include/json/forwards.h", "include/json/json.h", From 809bd2e2bdb28fb20ee84303d95def152f3d1ea9 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Fri, 23 Apr 2021 10:33:00 -0700 Subject: [PATCH 161/256] Prevent memory overflow in ParseAttrValue from nested tensors. PiperOrigin-RevId: 370108442 Change-Id: I84d64a5e8895a6aeffbf4749841b4c54d51b5889 --- tensorflow/core/framework/attr_value_util.cc | 58 +++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc index a307c8a18c1862..ca1f316409b39b 100644 --- a/tensorflow/core/framework/attr_value_util.cc +++ b/tensorflow/core/framework/attr_value_util.cc @@ -38,6 +38,9 @@ namespace { // Do not construct large tensors to compute their hash or compare for equality. constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024; // 32mb +// Limit nesting of tensors to 100 deep to prevent memory overflow. +constexpr int kMaxTensorNestDepth = 100; + // Return the size of the tensor represented by this TensorProto. If shape is // not fully defined return -1. int64 TensorByteSize(const TensorProto& t) { @@ -224,6 +227,54 @@ string SummarizeFunc(const NameAttrList& func) { return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]"); } +bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) { + int nests = 0; + int maxed_out = to_parse.length(); + int open_curly = to_parse.find('{'); + int open_bracket = to_parse.find('<'); + int close_curly = to_parse.find('}'); + int close_bracket = to_parse.find('>'); + if (open_curly == -1) { + open_curly = maxed_out; + } + if (open_bracket == -1) { + open_bracket = maxed_out; + } + int min = std::min(open_curly, open_bracket); + do { + if (open_curly == maxed_out && open_bracket == maxed_out) { + return true; + } + if (min == open_curly) { + nests += 1; + open_curly = to_parse.find('{', open_curly + 1); + if (open_curly == -1) { + open_curly = maxed_out; + } + } else if (min == open_bracket) { + nests += 1; + open_bracket = to_parse.find('<', open_bracket + 1); + if (open_bracket == -1) { + open_bracket = maxed_out; + } + } else if (min == close_curly) { + nests -= 1; + close_curly = to_parse.find('}', close_curly + 1); + if (close_curly == -1) { + close_curly = maxed_out; + } + } else if (min == close_bracket) { + nests -= 1; + close_bracket = to_parse.find('>', close_bracket + 1); + if (close_bracket == -1) { + close_bracket = maxed_out; + } + } + min = std::min({open_curly, open_bracket, close_curly, close_bracket}); + } while (nests < 100); + return false; +} + } // namespace string SummarizeAttrValue(const AttrValue& attr_value) { @@ -448,7 +499,12 @@ bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) { } else { to_parse = strings::StrCat(field_name, ": ", text); } - + if (field_name == "tensor") { + if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth, + to_parse)) { + return false; + } + } return ProtoParseFromString(to_parse, out); } From 02de907b32b5653d26f55f206f8b01673a8b8404 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 15:22:49 -0700 Subject: [PATCH 162/256] Fix heap OOB / undefined behavior in `RaggedTensorToTensor` PiperOrigin-RevId: 373244623 Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2 --- .../kernels/ragged_tensor_to_tensor_op.cc | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index d452c4d19c3711..8905647fa6b64f 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -207,8 +207,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { DCHECK_EQ(result->size(), first_dimension); } - void CalculateOutputIndexRowSplit( - OpKernelContext* context, const RowPartitionTensor& row_split, + Status CalculateOutputIndexRowSplit( + const RowPartitionTensor& row_split, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -232,10 +232,11 @@ class RaggedTensorToTensorBaseOp : public OpKernel { result->push_back(-1); } } - if (row_split_size > 0) { - OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), - errors::InvalidArgument("Invalid row split size.")); + if (row_split_size > 0 && result->size() != row_split(row_split_size - 1)) { + return errors::InvalidArgument("Invalid row split size."); } + + return Status::OK(); } // Calculate the output index of the first element of a list. @@ -259,20 +260,26 @@ class RaggedTensorToTensorBaseOp : public OpKernel { // result[6] = -1 because parent_output_index[value_rowids[6]] == -1 // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] - void CalculateOutputIndexValueRowID( - OpKernelContext* context, const RowPartitionTensor& value_rowids, + Status CalculateOutputIndexValueRowID( + const RowPartitionTensor& value_rowids, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { - return; + return Status::OK(); } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); - DCHECK_LT(current_value_rowid, parent_output_index.size()); + + if (current_value_rowid >= parent_output_index.size()) { + return errors::InvalidArgument( + "Got current_value_rowid=", current_value_rowid, + " which is not less than ", parent_output_index.size()); + } + INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { @@ -289,13 +296,23 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } else { current_output_column = 0; current_value_rowid = next_value_rowid; - DCHECK_LT(next_value_rowid, parent_output_index.size()); + + if (next_value_rowid >= parent_output_index.size()) { + return errors::InvalidArgument( + "Got next_value_rowid=", next_value_rowid, + " which is not less than ", parent_output_index.size()); + } + current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } - OP_REQUIRES(context, result->size() == value_rowids.size(), - errors::InvalidArgument("Invalid row ids.")); + + if (result->size() != value_rowids.size()) { + return errors::InvalidArgument("Invalid row ids."); + } + + return Status::OK(); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, @@ -308,10 +325,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: - CalculateOutputIndexValueRowID( - context, row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, result); - return tensorflow::Status::OK(); + return CalculateOutputIndexValueRowID( + row_partition_tensor, parent_output_index, output_index_multiplier, + output_size, result); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( @@ -319,10 +335,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } - CalculateOutputIndexRowSplit( - context, row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, result); - return tensorflow::Status::OK(); + return CalculateOutputIndexRowSplit( + row_partition_tensor, parent_output_index, output_index_multiplier, + output_size, result); default: return errors::InvalidArgument( "Unsupported partition type:", From 9c0d5a842aa4a8a7985ffdf4f860fd39b504ab80 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 15:41:51 -0700 Subject: [PATCH 163/256] Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373248068 Change-Id: I0a2041a0747901b3f00387a6a3bce9bca6b0b3b1 --- tensorflow/core/kernels/sparse_add_op.cc | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tensorflow/core/kernels/sparse_add_op.cc b/tensorflow/core/kernels/sparse_add_op.cc index 346206365af8d5..2bd05fa41adc26 100644 --- a/tensorflow/core/kernels/sparse_add_op.cc +++ b/tensorflow/core/kernels/sparse_add_op.cc @@ -44,6 +44,11 @@ class SparseAddOp : public OpKernel { b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); + const int num_dims = a_indices->dim_size(1); + OP_REQUIRES(ctx, b_indices->dim_size(1) == num_dims, + errors::InvalidArgument( + "Input indices must have the same dimension, got ", + num_dims, " and ", b_indices->dim_size(1))); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); @@ -72,6 +77,13 @@ class SparseAddOp : public OpKernel { "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); + OP_REQUIRES( + ctx, a_shape->NumElements() == num_dims, + errors::InvalidArgument("Second dimension of a_indices and length of " + "a_shape must match, got ", + num_dims, " and ", a_shape->NumElements())); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Tesors must not be empty")); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( @@ -100,11 +112,6 @@ class SparseAddOp : public OpKernel { std::vector> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector out_values; - const int num_dims = a_shape->dim_size(0); - - OP_REQUIRES(ctx, num_dims > 0, - errors::InvalidArgument("Invalid input_a shape. Received: ", - a_shape->DebugString())); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. From 38aa2a431056c6c17c212c888e8f8eb18cfd28ed Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Tue, 1 Jun 2021 12:26:15 -0700 Subject: [PATCH 164/256] Prevent yet another division by zero --- tensorflow/core/kernels/conv_grad_input_ops.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc index 2dd63d1f4d05b7..a89e5c7185c0f6 100644 --- a/tensorflow/core/kernels/conv_grad_input_ops.cc +++ b/tensorflow/core/kernels/conv_grad_input_ops.cc @@ -668,6 +668,11 @@ class Conv2DCustomBackpropInputOp : public OpKernel { dims.batch_size == 1 || thread_work_unit_size >= min_thread_work_unit_size; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = use_parallel_contraction ? 1 From 984f2af0024a2a898f3c3df505fd12d14fd9fb33 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 18:32:56 -0700 Subject: [PATCH 165/256] Fix one more FPE. --- tensorflow/core/kernels/conv_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc index ab8e24a311ff68..287cf4a923b31c 100644 --- a/tensorflow/core/kernels/conv_ops.cc +++ b/tensorflow/core/kernels/conv_ops.cc @@ -425,6 +425,9 @@ Status ComputeConv2DDimension(const Conv2DParameters& params, errors::InvalidArgument("Patch depth too large")); const int in_depth = static_cast(in_depth_raw); const int patch_depth = static_cast(patch_depth_raw); + TF_REQUIRES(patch_depth > 0, + errors::InvalidArgument( + "filter depth must be stricly positive, got ", patch_depth)); TF_REQUIRES(in_depth % patch_depth == 0, errors::InvalidArgument( "input depth must be evenly divisible by filter depth: ", From 808ad5c16eca8cae3bcfd6ec9af55e0d64138a6b Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 15:30:30 -0700 Subject: [PATCH 166/256] Fix heap-buffer-overflow issue with . --- tensorflow/core/kernels/sparse_reshape_op.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/sparse_reshape_op.cc b/tensorflow/core/kernels/sparse_reshape_op.cc index 6eb5f0af635c28..c3b1932a1735b0 100644 --- a/tensorflow/core/kernels/sparse_reshape_op.cc +++ b/tensorflow/core/kernels/sparse_reshape_op.cc @@ -26,6 +26,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_util.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -34,6 +35,17 @@ class SparseReshapeOp : public OpKernel { explicit SparseReshapeOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { + const Tensor& input_indices_in = context->input(0); + const Tensor& input_shape_in = context->input(1); + + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices_in.shape()), + errors::InvalidArgument("Input must be a matrix.")); + OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape_in.shape()), + errors::InvalidArgument("Input shape must be a vector.")); + OP_REQUIRES(context, + input_indices_in.dim_size(1) == input_shape_in.dim_size(0), + errors::InvalidArgument( + "Input tensor rank must match input shape length.")); ReshapeSparseTensor(context, context->input(0), context->input(1), context->input(2), 0 /* output indices index */, 1 /* output shape index */); From 7a7e905c929c77000cd0dd0fe3d7d8fd769113c5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 2 Jun 2021 13:43:21 -0700 Subject: [PATCH 167/256] Fix r2.2 branch after cherrypicks --- tensorflow/core/kernels/sparse_split_op.cc | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index ca3e77f76af7f7..b0c147da8a8344 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -64,17 +65,20 @@ class SparseSplitOp : public OpKernel { num_split_)); // Prevent overflow by constructing the dense shape separately - TensorShape dense_shape; + int64 total_elements = 1; const auto input_shape_flat = input_shape.flat(); for (int i = 0; i < input_shape.NumElements(); i++) { - OP_REQUIRES_OK(context, - dense_shape.AddDimWithStatus(input_shape_flat(i))); + total_elements = + MultiplyWithoutOverflow(total_elements, input_shape_flat(i)); + OP_REQUIRES(context, total_elements >= 0, + errors::Internal("Encountered overflow in dense shape")); } sparse::SparseTensor sparse_tensor; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create(input_indices, input_values, - dense_shape, &sparse_tensor)); + sparse::SparseTensor::Create( + input_indices, input_values, + TensorShape(input_shape.vec()), &sparse_tensor)); std::vector outputs; OP_REQUIRES_OK(context, From 3d27d9c4c89f5a5cc6c9d84986a6fa97b5f1ca6a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 17:45:51 -0700 Subject: [PATCH 168/256] Cherry pick 2.3 Add missing valuidation to FusedBatchNorm --- .../core/kernels/fused_batch_norm_op.cc | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc index 59470c8ac6e027..bd1b94d34b96fc 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.cc +++ b/tensorflow/core/kernels/fused_batch_norm_op.cc @@ -1267,6 +1267,33 @@ class FusedBatchNormOpBase : public OpKernel { context, estimated_variance.dims() == 1, errors::InvalidArgument("estimated_variance must be 1-dimensional", estimated_variance.shape().DebugString())); + + const auto num_channels = GetTensorDim(x, tensor_format_, 'C'); + OP_REQUIRES( + context, scale.NumElements() == num_channels, + errors::InvalidArgument("scale must have the same number of elements " + "as the channels of x, got ", + scale.NumElements(), " and ", num_channels)); + OP_REQUIRES( + context, offset.NumElements() == num_channels, + errors::InvalidArgument("offset must have the same number of elements " + "as the channels of x, got ", + offset.NumElements(), " and ", num_channels)); + if (estimated_mean.NumElements() != 0) { + OP_REQUIRES(context, estimated_mean.NumElements() == num_channels, + errors::InvalidArgument( + "mean must be empty or have the same number of " + "elements as the channels of x, got ", + estimated_mean.NumElements(), " and ", num_channels)); + } + if (estimated_variance.NumElements() != 0) { + OP_REQUIRES(context, estimated_variance.NumElements() == num_channels, + errors::InvalidArgument( + "variance must be empty or have the same number of " + "elements as the channels of x, got ", + estimated_variance.NumElements(), " and ", num_channels)); + } + if (has_side_input_) { OP_REQUIRES(context, side_input->shape() == x.shape(), errors::InvalidArgument( @@ -1279,7 +1306,7 @@ class FusedBatchNormOpBase : public OpKernel { // NOTE(ezhulenev): This requirement is coming from implementation // details of cudnnBatchNormalizationForwardTrainingEx. OP_REQUIRES( - context, !is_training_ || x.dim_size(3) % 4 == 0, + context, !is_training_ || num_channels % 4 == 0, errors::InvalidArgument("FusedBatchNorm with activation requires " "channel dimension to be a multiple of 4.")); } From 2a38deb6f26b088f22a7eceb6425805ee293bf10 Mon Sep 17 00:00:00 2001 From: Yu-Cheng Ling Date: Mon, 3 May 2021 09:07:42 -0700 Subject: [PATCH 169/256] Cherrypick2.3 TFLite: Error out when the graph has a recurion. --- tensorflow/lite/BUILD | 1 + tensorflow/lite/core/subgraph.cc | 46 ++++++++++++++++++ tensorflow/lite/core/subgraph.h | 5 ++ tensorflow/lite/kernels/while.cc | 2 - tensorflow/lite/model_test.cc | 18 +++++++ .../lite/testdata/unsupported_recursion.bin | Bin 0 -> 600 bytes 6 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 tensorflow/lite/testdata/unsupported_recursion.bin diff --git a/tensorflow/lite/BUILD b/tensorflow/lite/BUILD index 0eae6ad17c0e85..83e57ef4c4848a 100644 --- a/tensorflow/lite/BUILD +++ b/tensorflow/lite/BUILD @@ -478,6 +478,7 @@ cc_test( "testdata/test_min_runtime.bin", "testdata/test_model.bin", "testdata/test_model_broken.bin", + "testdata/unsupported_recursion.bin", ], tags = [ "tflite_not_portable", diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index b76be1fcf204e8..e1ed653ae1b6db 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -141,6 +141,42 @@ const char* GetTFLiteOpName(const TfLiteRegistration& op_reg) { return tflite::EnumNamesBuiltinOperator()[op_reg.builtin_code]; } +// An utility test to detect if the subgraph is abused: +// 1. Detects if recursion exists in the graph (recursion is not currently +// supported. +// 2. Detects if the interpreter / subgraph is used in multiple subgraphs. +// Note: It's clearly documented that the interpreter / subgraph are not +// thread-safe. This serves as a check with possible false negatives +// unless we switch to atomic boolean flags. +class SubgraphGuard { + public: + SubgraphGuard(TfLiteContext* context, bool* is_subgraph_in_use) + : is_subgraph_in_use_(is_subgraph_in_use) { + if (*is_subgraph_in_use_) { + TF_LITE_KERNEL_LOG( + context, + "Subgraph is already in use. Using an interpreter or a subgraph in " + "multiple threads is not supported. Recursion in the graph is not " + "supported."); + status_ = kTfLiteError; + } else { + *is_subgraph_in_use_ = true; + } + } + ~SubgraphGuard() { + // If tht original status was OK, recover the boolean flag. + if (status_ == kTfLiteOk) { + *is_subgraph_in_use_ = false; + } + } + + TfLiteStatus status() const { return status_; } + + private: + TfLiteStatus status_ = kTfLiteOk; + bool* is_subgraph_in_use_; +}; + } // namespace // A trivial implementation of GraphInfo around the Interpreter. @@ -637,6 +673,7 @@ TfLiteStatus Subgraph::BytesRequired(TfLiteType type, const int* dims, TfLiteStatus Subgraph::AllocateTensors() { TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler_.get(), "AllocateTensors"); + if (!consistent_) { ReportError("AllocateTensors() called on inconsistent model."); return kTfLiteError; @@ -660,6 +697,12 @@ TfLiteStatus Subgraph::AllocateTensors() { return kTfLiteOk; } + // Note `AllocateTensors` sometimes calls itself recursively above + // for delegates. Therefore only the logic below need to be guarded + // by `SubgraphGuard`. + SubgraphGuard guard(&context_, &is_subgraph_in_use_); + TF_LITE_ENSURE_OK(&context_, guard.status()); + next_execution_plan_index_to_prepare_ = 0; next_execution_plan_index_to_plan_allocation_ = 0; if (memory_planner_) { @@ -917,6 +960,9 @@ TfLiteStatus Subgraph::PrepareOpsAndTensors() { } TfLiteStatus Subgraph::Invoke() { + SubgraphGuard guard(&context_, &is_subgraph_in_use_); + TF_LITE_ENSURE_OK(&context_, guard.status()); + if (!consistent_) { ReportError("Invoke called on model that is not consistent."); return kTfLiteError; diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index 979c709614c90a..2c40f36b4cc615 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -721,5 +721,10 @@ using Subgraph = tflrt::Subgraph; using Subgraph = impl::Subgraph; #endif + // Whether the subgraph is currently in use (e.g. running the `Invoke` + // or `AllocateTensors` functions). + bool is_subgraph_in_use_ = false; +}; + } // namespace tflite #endif // TENSORFLOW_LITE_CORE_SUBGRAPH_H_ diff --git a/tensorflow/lite/kernels/while.cc b/tensorflow/lite/kernels/while.cc index 164edfa92809de..b50cdff99741b1 100644 --- a/tensorflow/lite/kernels/while.cc +++ b/tensorflow/lite/kernels/while.cc @@ -135,8 +135,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* subgraphs = this_subgraph->GetSubgraphs(); TF_LITE_ENSURE(context, op_data->cond_subgraph_index < subgraphs->size()); TF_LITE_ENSURE(context, op_data->body_subgraph_index < subgraphs->size()); - TF_LITE_ENSURE(context, - op_data->cond_subgraph_index != op_data->body_subgraph_index); Subgraph* cond_subgraph = (*subgraphs)[op_data->cond_subgraph_index].get(); Subgraph* body_subgraph = (*subgraphs)[op_data->body_subgraph_index].get(); diff --git a/tensorflow/lite/model_test.cc b/tensorflow/lite/model_test.cc index ba96494225ccc2..a51e030e849dd0 100644 --- a/tensorflow/lite/model_test.cc +++ b/tensorflow/lite/model_test.cc @@ -438,6 +438,24 @@ TEST(BasicFlatBufferModel, TestParseModelWithSparseTensor) { } // TODO(b/150072943): Add malformed model with sparse tensor tests. +// Recursion & reentrant are not supported in TFLite. +// The test ensures it fails gracefullly instead of crashing with +// a stack overflow. +TEST(BasicFlatBufferModel, TestUnsupportedRecursion) { + const auto model_path = + "tensorflow/lite/testdata/unsupported_recursion.bin"; + + std::unique_ptr model = + FlatBufferModel::BuildFromFile(model_path); + ASSERT_NE(model, nullptr); + + tflite::ops::builtin::BuiltinOpResolver resolver; + InterpreterBuilder builder(*model, resolver); + std::unique_ptr interpreter; + ASSERT_EQ(builder(&interpreter), kTfLiteOk); + ASSERT_NE(interpreter, nullptr); + ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk); +} // TODO(aselle): Add tests for serialization of builtin op data types. // These tests will occur with the evaluation tests of individual operators, diff --git a/tensorflow/lite/testdata/unsupported_recursion.bin b/tensorflow/lite/testdata/unsupported_recursion.bin new file mode 100644 index 0000000000000000000000000000000000000000..525c5383ab4ef6283d687aeb4004b38a8981773a GIT binary patch literal 600 zcmZ9Ky-Nc@5XE0KBoSjwAt)rp6_yhG#1vKvijYXufSnM$!=S-~Aeu6$NGcVy43ffD zv=J10KN>shH2xi)@7-ZXCm}8pCn_Msmv&~WA#tZJ0cCz&tN{cxMIz@J9|!b*ReAQF{b^11cvA(nOQj# zCZ44>NsC+&i(EIS4_B9;x1Szw9@@qB(roJXt*H| z)K!b}Cy_?#d+(@U2g3_29YX)Bc3Md1?cfLgjkUf;)Q?N{Qaur!JZFqWInI}A1=p=W z{yUMC1Hqutn9*~iuSWZfs%Pq$ZsT{)bY7h?O~U>M)(#XZr46U-R&1w=9*%=M1n%g| fP6NS#(KVwVMlX!&zGdTq_ Date: Tue, 27 Apr 2021 17:47:36 -0700 Subject: [PATCH 170/256] CherryPick2.3:Add depth_to_space TFLite op --- tensorflow/lite/kernels/depth_to_space.cc | 1 + tensorflow/lite/kernels/depth_to_space_test.cc | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/tensorflow/lite/kernels/depth_to_space.cc b/tensorflow/lite/kernels/depth_to_space.cc index 1637ad4350f889..c2047f1062f493 100644 --- a/tensorflow/lite/kernels/depth_to_space.cc +++ b/tensorflow/lite/kernels/depth_to_space.cc @@ -58,6 +58,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; const int input_channels = input->dims->data[3]; diff --git a/tensorflow/lite/kernels/depth_to_space_test.cc b/tensorflow/lite/kernels/depth_to_space_test.cc index 4429faf9909178..c03512dd710ad7 100644 --- a/tensorflow/lite/kernels/depth_to_space_test.cc +++ b/tensorflow/lite/kernels/depth_to_space_test.cc @@ -60,6 +60,11 @@ TEST(DepthToSpaceOpModel, BadBlockSize) { EXPECT_DEATH(DepthToSpaceOpModel({TensorType_FLOAT32, {1, 1, 1, 4}}, 4), "Cannot allocate tensors"); } + +TEST(DepthToSpaceOpModel, NoBlockSize) { + EXPECT_DEATH(DepthToSpaceOpModel({TensorType_FLOAT32, {1, 1, 1, 4}}, 0), + "Cannot allocate tensors"); +} #endif TEST(DepthToSpaceOpModel, Float32) { From 3f158d721a47d6ba03713afe6650c9acc5f9fc26 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 15:35:39 -0700 Subject: [PATCH 171/256] CherryPick2.3:Fix heap-buffer-overflow issue with tf.raw_ops.SparseFillEmptyRows. --- tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index 542069ccd88e18..496413fd4f8074 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -69,7 +69,10 @@ class SparseFillEmptyRowsOp : public OpKernel { default_value_t.shape().DebugString())); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. - + // Also add check that dense rank > 0. + OP_REQUIRES(context, dense_shape_t.NumElements() != 0, + errors::InvalidArgument("Dense shape cannot be empty."), + done); const T& default_value = default_value_t.scalar()(); const auto indices = indices_t.matrix(); const auto values = values_t.vec(); From 8bdd6a6a7979f95958043dc6082dac57c41ad3d0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Apr 2021 06:36:59 -0700 Subject: [PATCH 172/256] CherryPick:2.3 Fix tf.io.decode_raw bugs --- .../core/kernels/decode_padded_raw_op.cc | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/decode_padded_raw_op.cc b/tensorflow/core/kernels/decode_padded_raw_op.cc index 12e8ec6aff0d41..d3e830c06f209c 100644 --- a/tensorflow/core/kernels/decode_padded_raw_op.cc +++ b/tensorflow/core/kernels/decode_padded_raw_op.cc @@ -19,6 +19,7 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { @@ -83,14 +84,13 @@ class DecodePaddedRawOp : public OpKernel { // can copy the memory directly. if (!convert_data_endianness_ || sizeof(T) == 1) { for (int64 i = 0; i < flat_in.size(); ++i) { - const T* in_data = reinterpret_cast(flat_in(i).data()); - - if (flat_in(i).size() > fixed_length) { - memcpy(out_data, in_data, fixed_length); - } else { - memcpy(out_data, in_data, flat_in(i).size()); - } - out_data += fixed_length; + const auto to_copy = + std::min(flat_in(i).size(), static_cast(fixed_length)); + memcpy(out_data, flat_in(i).data(), to_copy); + // Note: increase out_data by width since it's already of type T* so + // each shift amount is implicitly multiplied by sizeof(T) according to + // pointer arithmetic rules. + out_data += width; } } else { // Otherwise, the data is not in the host's byte order, and rather than a @@ -105,7 +105,10 @@ class DecodePaddedRawOp : public OpKernel { p_in += sizeof(T), p_out += sizeof(T)) { std::reverse_copy(p_in, p_in + sizeof(T), p_out); } - out_data += fixed_length; + // Note: increase out_data by width since it's already of type T* so + // each shift amount is implicitly multiplied by sizeof(T) according to + // pointer arithmetic rules. + out_data += width; } } } From 25a9b85fa513b7e88d8c97f9a441cd94c8e2c034 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 2 Jun 2021 19:07:56 -0700 Subject: [PATCH 173/256] Update tensorflow/core/kernels/decode_padded_raw_op.cc --- tensorflow/core/kernels/decode_padded_raw_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/core/kernels/decode_padded_raw_op.cc b/tensorflow/core/kernels/decode_padded_raw_op.cc index d3e830c06f209c..ca7c7104b442d2 100644 --- a/tensorflow/core/kernels/decode_padded_raw_op.cc +++ b/tensorflow/core/kernels/decode_padded_raw_op.cc @@ -19,7 +19,6 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { From 0edd559a021aec0c037b508f904489cf731c7700 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 18:32:03 -0700 Subject: [PATCH 174/256] Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373274848 Change-Id: I3a665ac3a29dee9fb69bdf408a939330cb93ea75 --- .../kernels/sparse_sparse_binary_op_shared.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc index 9fe42e05d879ee..eb993a5965043b 100644 --- a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc @@ -150,6 +150,7 @@ class SparseSparseBinaryOpShared : public OpKernel { const int64 a_nnz = a_indices_t->dim_size(0); const int64 b_nnz = b_indices_t->dim_size(0); + const auto a_values = a_values_t->vec(); const auto b_values = b_values_t->vec(); @@ -166,6 +167,14 @@ class SparseSparseBinaryOpShared : public OpKernel { "Input shapes should be a vector but received shapes ", a_shape_t->shape().DebugString(), " and ", b_shape_t->shape().DebugString())); + const int num_dims = a_indices_t->dim_size(1); + OP_REQUIRES( + ctx, a_shape_t->NumElements() == num_dims, + errors::InvalidArgument("Second dimension of a_indices and length of " + "a_shape must match, got ", + num_dims, " and ", a_shape_t->NumElements())); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Tensors must not be empty")); OP_REQUIRES(ctx, a_shape_t->IsSameSize(*b_shape_t), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", @@ -180,12 +189,6 @@ class SparseSparseBinaryOpShared : public OpKernel { " for dimension ", i)); } - OP_REQUIRES( - ctx, a_indices_t->dim_size(1) == b_indices_t->dim_size(1), - errors::InvalidArgument( - "Indices' dimensions do not match: got ", a_indices_t->dim_size(1), - " and ", b_indices_t->dim_size(1), " for the second dimension.")); - const int num_dims = a_indices_t->dim_size(1); const auto a_indices_mat = a_indices_t->matrix(); const auto b_indices_mat = b_indices_t->matrix(); std::vector a_augmented_values, b_augmented_values; From dd82d8406781c9732fa5853e9af725885b8a68f1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 18:36:43 -0700 Subject: [PATCH 175/256] Ensure validation sticks in banded_triangular_solve_op PiperOrigin-RevId: 373275480 Change-Id: Id7717cf275b2d6fdb9441fbbe166d555182d2e79 --- tensorflow/core/kernels/banded_triangular_solve_op.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/core/kernels/banded_triangular_solve_op.cc b/tensorflow/core/kernels/banded_triangular_solve_op.cc index cd1b908043abe1..ad2467ebefaf1a 100644 --- a/tensorflow/core/kernels/banded_triangular_solve_op.cc +++ b/tensorflow/core/kernels/banded_triangular_solve_op.cc @@ -217,6 +217,7 @@ class BandedTriangularSolveOpCpu : public OpKernel { const Tensor& in1 = ctx->input(1); ValidateInputTensors(ctx, in0, in1); + if (!ctx->status().ok()) return; MatMulBCast bcast(in0.shape().dim_sizes(), in1.shape().dim_sizes()); OP_REQUIRES( From 00a11472673d3ca7a6491f7342cad889936d056e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 08:04:04 -0700 Subject: [PATCH 176/256] Bump curl --- tensorflow/workspace.bzl | 8 +-- third_party/curl.BUILD | 151 ++++++++++++++++++++++++++++++--------- 2 files changed, 122 insertions(+), 37 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 3e474b3750c5be..f2139a795670cb 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -672,12 +672,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "curl", build_file = clean_dep("//third_party:curl.BUILD"), - sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", - strip_prefix = "curl-7.69.1", + sha256 = "3b4378156ba09e224008e81dcce854b7ce4d182b1f9cfb97fe5ed9e9c18c6bd3", + strip_prefix = "curl-7.76.0", system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz", - "https://curl.haxx.se/download/curl-7.69.1.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.76.0.tar.gz", + "https://curl.haxx.se/download/curl-7.76.0.tar.gz", ], ) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index 62fc946956da85..3b73ac7d29fa8f 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -25,22 +25,33 @@ CURL_WIN_SRCS = [ "lib/asyn-thread.c", "lib/inet_ntop.c", "lib/system_win32.c", - "lib/x509asn1.c", - "lib/vtls/schannel.c", - "lib/vtls/schannel_verify.c", - "lib/idn_win32.c", + "lib/setup-win32.h", ] cc_library( name = "curl", srcs = [ "include/curl_config.h", + "lib/altsvc.c", + "lib/altsvc.h", + "lib/amigaos.c", "lib/amigaos.h", "lib/arpa_telnet.h", - "lib/asyn.h", "lib/asyn-ares.c", + "lib/asyn.h", "lib/base64.c", + "lib/c-hyper.c", + "lib/c-hyper.h", + "lib/config-amigaos.h", + "lib/config-dos.h", + "lib/config-mac.h", + "lib/config-os400.h", + "lib/config-plan9.h", + "lib/config-riscos.h", + "lib/config-tpf.h", + "lib/config-vxworks.h", "lib/config-win32.h", + "lib/config-win32ce.h", "lib/conncache.c", "lib/conncache.h", "lib/connect.c", @@ -54,14 +65,20 @@ cc_library( "lib/curl_base64.h", "lib/curl_ctype.c", "lib/curl_ctype.h", + "lib/curl_des.c", "lib/curl_des.h", + "lib/curl_endian.c", "lib/curl_endian.h", "lib/curl_fnmatch.c", "lib/curl_fnmatch.h", + "lib/curl_get_line.c", + "lib/curl_get_line.h", "lib/curl_gethostname.c", "lib/curl_gethostname.h", + "lib/curl_gssapi.c", "lib/curl_gssapi.h", "lib/curl_hmac.h", + "lib/curl_krb5.h", "lib/curl_ldap.h", "lib/curl_md4.h", "lib/curl_md5.h", @@ -70,14 +87,19 @@ cc_library( "lib/curl_memrchr.h", "lib/curl_multibyte.c", "lib/curl_multibyte.h", + "lib/curl_ntlm_core.c", "lib/curl_ntlm_core.h", + "lib/curl_ntlm_wb.c", "lib/curl_ntlm_wb.h", + "lib/curl_path.c", + "lib/curl_path.h", "lib/curl_printf.h", + "lib/curl_range.c", + "lib/curl_range.h", "lib/curl_rtmp.c", "lib/curl_rtmp.h", "lib/curl_sasl.c", "lib/curl_sasl.h", - "lib/curl_sec.h", "lib/curl_setup.h", "lib/curl_setup_once.h", "lib/curl_sha256.h", @@ -86,23 +108,35 @@ cc_library( "lib/curl_threads.c", "lib/curl_threads.h", "lib/curlx.h", + "lib/dict.c", "lib/dict.h", + "lib/doh.c", + "lib/doh.h", "lib/dotdot.c", "lib/dotdot.h", + "lib/dynbuf.c", + "lib/dynbuf.h", "lib/easy.c", + "lib/easygetopt.c", "lib/easyif.h", + "lib/easyoptions.c", + "lib/easyoptions.h", "lib/escape.c", "lib/escape.h", + "lib/file.c", "lib/file.h", "lib/fileinfo.c", "lib/fileinfo.h", "lib/formdata.c", "lib/formdata.h", + "lib/ftp.c", "lib/ftp.h", + "lib/ftplistparser.c", "lib/ftplistparser.h", "lib/getenv.c", "lib/getinfo.c", "lib/getinfo.h", + "lib/gopher.c", "lib/gopher.h", "lib/hash.c", "lib/hash.h", @@ -115,6 +149,8 @@ cc_library( "lib/hostip4.c", "lib/hostip6.c", "lib/hostsyn.c", + "lib/hsts.c", + "lib/hsts.h", "lib/http.c", "lib/http.h", "lib/http2.c", @@ -123,17 +159,24 @@ cc_library( "lib/http_chunks.h", "lib/http_digest.c", "lib/http_digest.h", + "lib/http_negotiate.c", "lib/http_negotiate.h", + "lib/http_ntlm.c", "lib/http_ntlm.h", "lib/http_proxy.c", "lib/http_proxy.h", + "lib/http_aws_sigv4.c", + "lib/http_aws_sigv4.h", + "lib/idn_win32.c", "lib/if2ip.c", "lib/if2ip.h", + "lib/imap.c", "lib/imap.h", "lib/inet_ntop.h", "lib/inet_pton.c", "lib/inet_pton.h", "lib/krb5.c", + "lib/ldap.c", "lib/llist.c", "lib/llist.h", "lib/md4.c", @@ -143,38 +186,43 @@ cc_library( "lib/mime.c", "lib/mime.h", "lib/mprintf.c", + "lib/mqtt.c", + "lib/mqtt.h", "lib/multi.c", "lib/multihandle.h", "lib/multiif.h", "lib/netrc.c", "lib/netrc.h", + "lib/non-ascii.c", "lib/non-ascii.h", "lib/nonblock.c", "lib/nonblock.h", - "lib/nwlib.c", - "lib/nwos.c", + #"lib/nwlib.c", + #"lib/nwos.c", + "lib/openldap.c", "lib/parsedate.c", "lib/parsedate.h", - "lib/pingpong.h", "lib/pingpong.c", + "lib/pingpong.h", + "lib/pop3.c", "lib/pop3.h", "lib/progress.c", "lib/progress.h", + "lib/psl.c", + "lib/psl.h", "lib/quic.h", "lib/rand.c", "lib/rand.h", - "lib/rename.h", "lib/rename.c", + "lib/rename.h", "lib/rtsp.c", "lib/rtsp.h", - "lib/security.c", "lib/select.c", "lib/select.h", "lib/sendf.c", "lib/sendf.h", "lib/setopt.c", "lib/setopt.h", - "lib/setup-os400.h", "lib/setup-vms.h", "lib/sha256.c", "lib/share.c", @@ -182,13 +230,17 @@ cc_library( "lib/sigpipe.h", "lib/slist.c", "lib/slist.h", + "lib/smb.c", "lib/smb.h", + "lib/smtp.c", "lib/smtp.h", "lib/sockaddr.h", - "lib/socketpair.h", "lib/socketpair.c", + "lib/socketpair.h", "lib/socks.c", "lib/socks.h", + "lib/socks_gssapi.c", + "lib/socks_sspi.c", "lib/speedcheck.c", "lib/speedcheck.h", "lib/splay.c", @@ -204,7 +256,9 @@ cc_library( "lib/strtoofft.c", "lib/strtoofft.h", "lib/system_win32.h", + "lib/telnet.c", "lib/telnet.h", + "lib/tftp.c", "lib/tftp.h", "lib/timeval.c", "lib/timeval.h", @@ -213,44 +267,69 @@ cc_library( "lib/url.c", "lib/url.h", "lib/urldata.h", + "lib/urlapi-int.h", + "lib/urlapi.c", + "lib/version.c", + "lib/version_win32.c", + "lib/version_win32.h", + "lib/warnless.c", + "lib/warnless.h", + "lib/wildcard.c", + "lib/wildcard.h", + "lib/x509asn1.c", + "lib/x509asn1.h", "lib/vauth/cleartext.c", "lib/vauth/cram.c", "lib/vauth/digest.c", "lib/vauth/digest.h", + "lib/vauth/digest_sspi.c", + "lib/vauth/krb5_gssapi.c", + "lib/vauth/krb5_sspi.c", + "lib/vauth/ntlm.c", "lib/vauth/ntlm.h", + "lib/vauth/ntlm_sspi.c", "lib/vauth/oauth2.c", + "lib/vauth/spnego_sspi.c", "lib/vauth/vauth.c", "lib/vauth/vauth.h", - "lib/version.c", + "lib/vquic/ngtcp2.c", + "lib/vquic/ngtcp2.h", + "lib/vquic/quiche.c", + "lib/vquic/quiche.h", + "lib/vquic/vquic.c", + "lib/vquic/vquic.h", + "lib/vssh/libssh.c", + "lib/vssh/libssh2.c", "lib/vssh/ssh.h", + "lib/vssh/wolfssh.c", + "lib/vtls/bearssl.c", "lib/vtls/bearssl.h", + "lib/vtls/gskit.c", "lib/vtls/gskit.h", + "lib/vtls/gtls.c", "lib/vtls/gtls.h", + "lib/vtls/keylog.c", + "lib/vtls/keylog.h", + "lib/vtls/mbedtls.c", "lib/vtls/mbedtls.h", + "lib/vtls/mbedtls_threadlock.c", + "lib/vtls/mbedtls_threadlock.h", + "lib/vtls/mesalink.c", + "lib/vtls/mesalink.h", + "lib/vtls/nss.c", "lib/vtls/nssg.h", + "lib/vtls/openssl.c", "lib/vtls/openssl.h", + "lib/vtls/rustls.c", + "lib/vtls/rustls.h", + "lib/vtls/schannel.c", "lib/vtls/schannel.h", + "lib/vtls/schannel_verify.c", + "lib/vtls/sectransp.h", "lib/vtls/vtls.c", "lib/vtls/vtls.h", + "lib/vtls/wolfssl.c", "lib/vtls/wolfssl.h", - "lib/warnless.c", - "lib/warnless.h", - "lib/wildcard.c", - "lib/wildcard.h", - "lib/x509asn1.h", - "lib/psl.h", - "lib/psl.c", - "lib/vtls/sectransp.h", - "lib/vtls/mesalink.h", - "lib/vtls/mesalink.c", - "lib/curl_get_line.h", - "lib/curl_get_line.c", - "lib/urlapi-int.h", - "lib/urlapi.c", - "lib/altsvc.h", - "lib/altsvc.c", - "lib/doh.h", - "lib/doh.c", ] + select({ "@org_tensorflow//tensorflow:macos": [ "lib/vtls/sectransp.c", @@ -260,7 +339,6 @@ cc_library( ], "@org_tensorflow//tensorflow:windows": CURL_WIN_SRCS, "//conditions:default": [ - "lib/vtls/openssl.c", ], }), hdrs = [ @@ -269,6 +347,7 @@ cc_library( "include/curl/easy.h", "include/curl/mprintf.h", "include/curl/multi.h", + "include/curl/options.h", "include/curl/stdcheaders.h", "include/curl/system.h", "include/curl/typecheck-gcc.h", @@ -372,6 +451,8 @@ cc_binary( "src/tool_doswin.h", "src/tool_easysrc.c", "src/tool_easysrc.h", + "src/tool_filetime.c", + "src/tool_filetime.h", "src/tool_formparse.c", "src/tool_formparse.h", "src/tool_getparam.c", @@ -406,6 +487,8 @@ cc_binary( "src/tool_paramhlp.h", "src/tool_parsecfg.c", "src/tool_parsecfg.h", + "src/tool_progress.c", + "src/tool_progress.h", "src/tool_sdecls.h", "src/tool_setopt.c", "src/tool_setopt.h", @@ -425,6 +508,8 @@ cc_binary( "src/tool_writeenv.h", "src/tool_writeout.c", "src/tool_writeout.h", + "src/tool_writeout_json.c", + "src/tool_writeout_json.h", "src/tool_xattr.c", "src/tool_xattr.h", ], From c52c239faa529f61310a27faa7d6a006df9c9849 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Thu, 3 Jun 2021 11:31:16 -0700 Subject: [PATCH 177/256] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 0e47a5b6601a96..4fef8ce64ad3f0 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.3.3 + + + # Release 2.3.2 ## Bug Fixes and Other Changes From 7360dcbd3751ddd72b2ec72ac9a2639d2e0c5937 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Thu, 3 Jun 2021 11:53:40 -0700 Subject: [PATCH 178/256] Update version numbers to 2.3.3 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 7c34667b2ef9a4..f9f48582e3c958 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 3 -#define TF_PATCH_VERSION 2 +#define TF_PATCH_VERSION 3 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index ac839f3cbe0e3b..da0ef93f8cef33 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -59,7 +59,7 @@ load( # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.3.2" +VERSION = "2.3.3" VERSION_MAJOR = VERSION.split(".")[0] # Sanitize a dependency so that it works correctly from code that includes diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index b7cde30d2e1f0d..888d81d1f80458 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -49,7 +49,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.3.2' +_VERSION = '2.3.3' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 21ee5a2abf60f48ea48b384d05acdbbe4b15ced2 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 12:10:43 -0700 Subject: [PATCH 179/256] Update RELEASE.md --- RELEASE.md | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 4fef8ce64ad3f0..74291aa01364e5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,116 @@ # Release 2.3.3 - +This release introduces several vulnerability fixes: + + * Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512)) + * Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514)) + * Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513)) + * Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515)) + * Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516)) + * Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517)) + * Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518)) + * Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519)) + * Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521)) + * Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520)) + * Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522)) + * Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523)) + * Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524)) + * Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525)) + * Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526)) + * Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527)) + * Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528)) + * Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530)) + * Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529)) + * Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531)) + * Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532)) + * Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533)) + * Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535)) + * Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534)) + * Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537)) + * Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536)) + * Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538)) + * Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540)) + * Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542)) + * Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541)) + * Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544)) + * Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543)) + * Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545)) + * Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546)) + * Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547)) + * Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548)) + * Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549)) + * Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550)) + * Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551)) + * Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553)) + * Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552)) + * Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554)) + * Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555)) + * Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557)) + * Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556)) + * Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558)) + * Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559)) + * Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560)) + * Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561)) + * Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562)) + * Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563)) + * Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564)) + * Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565)) + * Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566)) + * Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568)) + * Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567)) + * Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570)) + * Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569)) + * Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571)) + * Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572)) + * Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575)) + * Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573)) + * Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574)) + * Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576)) + * Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577)) + * Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580)) + * Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578)) + * Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579)) + * Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581)) + * Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582)) + * Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584)) + * Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583)) + * Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585)) + * Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586)) + * Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587)) + * Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589)) + * Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588)) + * Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590)) + * Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592)) + * Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591)) + * Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595)) + * Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594)) + * Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596)) + * Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593)) + * Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597)) + * Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598)) + * Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599)) + * Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600)) + * Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602)) + * Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604)) + * Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601)) + * Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605)) + * Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603)) + * Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606)) + * Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608)) + * Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609)) + * Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607)) + * Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611)) + * Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610)) + * Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612)) + * Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613)) + * Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614)) + * Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615)) + * Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616)) + * Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618)) + * Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617)) + * Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619)) + * Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539)) + * Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286). # Release 2.3.2 From 8badcc59dda555244899b71cdee039b58a370c01 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 13:59:56 -0700 Subject: [PATCH 180/256] Fix broken code after cherrypick --- tensorflow/lite/core/subgraph.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index 2c40f36b4cc615..d66d3205cf101d 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -711,6 +711,10 @@ class Subgraph { // A map of resources. Owned by interpreter and shared by multiple subgraphs. resource::ResourceMap* resources_ = nullptr; + + // Whether the subgraph is currently in use (e.g. running the `Invoke` + // or `AllocateTensors` functions). + bool is_subgraph_in_use_ = false; }; } // namespace impl @@ -721,10 +725,5 @@ using Subgraph = tflrt::Subgraph; using Subgraph = impl::Subgraph; #endif - // Whether the subgraph is currently in use (e.g. running the `Invoke` - // or `AllocateTensors` functions). - bool is_subgraph_in_use_ = false; -}; - } // namespace tflite #endif // TENSORFLOW_LITE_CORE_SUBGRAPH_H_ From b889faaa99a24ae27e7c88cdb6c8c7d9b41e39dd Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Thu, 3 Jun 2021 16:31:31 -0700 Subject: [PATCH 181/256] Update sparse_fill_empty_rows_op.cc --- tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index 496413fd4f8074..2b80903a9210b1 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -71,8 +71,7 @@ class SparseFillEmptyRowsOp : public OpKernel { // dense_shape. Also add check that dense rank > 0. // Also add check that dense rank > 0. OP_REQUIRES(context, dense_shape_t.NumElements() != 0, - errors::InvalidArgument("Dense shape cannot be empty."), - done); + errors::InvalidArgument("Dense shape cannot be empty.")); const T& default_value = default_value_t.scalar()(); const auto indices = indices_t.matrix(); const auto values = values_t.vec(); From 2d6dc973ce66da10cd23d3a3a0a3a43a2b27ceab Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 12:36:40 -0700 Subject: [PATCH 182/256] Update conv.cc --- tensorflow/lite/kernels/conv.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 4326543ac62209..08ade1e53c8e40 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -793,11 +793,12 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, } template -void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, OpData* data, - const TfLiteTensor* input, const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* im2col, - TfLiteTensor* output) { +TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, OpData* data, + const TfLiteTensor* input, + const TfLiteTensor* filter, + const TfLiteTensor* bias, + TfLiteTensor* im2col, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); @@ -961,8 +962,9 @@ TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: if (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8) { if (data->is_hybrid_per_channel) { - EvalHybridPerChannel(context, node, params, data, input, - filter, bias, im2col, output); + TF_LITE_ENSURE_OK(context, EvalHybridPerChannel( + context, node, params, data, input, + filter, bias, im2col, output)); } else { TfLiteTensor* accum_scratch = &context->tensors[node->temporaries From a5635944d7429b9afac5a3b14d24ab05e88147f3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 13:10:24 -0700 Subject: [PATCH 183/256] Update conv.cc --- tensorflow/lite/kernels/conv.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 08ade1e53c8e40..1218c065dc7b11 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -873,11 +873,11 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, } template -void EvalHybrid(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, OpData* data, - const TfLiteTensor* input, const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* im2col, - TfLiteTensor* accum_scratch, TfLiteTensor* output) { +TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, OpData* data, + const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* im2col, + TfLiteTensor* accum_scratch, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); @@ -969,8 +969,10 @@ TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* accum_scratch = &context->tensors[node->temporaries ->data[data->accum_scratch_index]]; - EvalHybrid(context, node, params, data, input, filter, - bias, im2col, accum_scratch, output); + TF_LITE_ENSURE_OK(context, + EvalHybrid(context, node, params, data, + input, filter, bias, im2col, + accum_scratch, output); } } else { EvalFloat(context, node, params, data, input, filter, bias, From 12e2157d138ee532828e37c5f86c7ecb89b065dc Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 13:21:56 -0700 Subject: [PATCH 184/256] Update conv.cc --- tensorflow/lite/kernels/conv.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 1218c065dc7b11..390565f6e79b7b 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -972,7 +972,7 @@ TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, EvalHybrid(context, node, params, data, input, filter, bias, im2col, - accum_scratch, output); + accum_scratch, output)); } } else { EvalFloat(context, node, params, data, input, filter, bias, From 6a0306054333a559addf320b4c84f63059434c04 Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Fri, 4 Jun 2021 15:30:10 -0700 Subject: [PATCH 185/256] Update ragged_tensor_to_variant_op.cc --- tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 1457e5e2c73f7d..49c4a2411b8c3c 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -176,7 +176,7 @@ class RaggedTensorToVariantOp : public OpKernel { // Unbatch the Ragged Tensor and encode the components. std::vector ragged_components; auto batched_splits_top_vec = - batched_ragged_input.splits(0).vec(); + batched_ragged_input.nested_splits[0].vec(); int num_components = batched_splits_top_vec.size() - 1; OP_REQUIRES(context, num_components >= 0, errors::Internal("Invalid split argument.")); From b97e43b07260cc054cd2c3dce68bc1f44df6e875 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Fri, 9 Jul 2021 18:27:22 -0700 Subject: [PATCH 186/256] Disallow division by zero FPE in tf.raw_ops.SparseDenseCwiseDiv PiperOrigin-RevId: 383959809 Change-Id: Ibe88458bdf66a686c93e354b8255dec94285c560 --- .../core/kernels/sparse_dense_binary_op_shared.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index dac4a3d3e6bfcd..dda05dbc3b8cb2 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -114,7 +114,10 @@ class SparseDenseBinaryOpShared : public OpKernel { OP_REQUIRES_OK( ctx, ctx->allocate_temp(DataTypeToEnum::value, TensorShape({nnz}), &dense_gathered)); - + bool op_is_div = false; + if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) { + op_is_div = true; + } // Pulls relevant entries from the dense side, with reshape and broadcasting // *of the dense side* taken into account. Use a TensorRef to avoid blowing // up memory. @@ -143,6 +146,12 @@ class SparseDenseBinaryOpShared : public OpKernel { errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \ "dense side with broadcasted shape")); \ dense_gathered_flat(i) = rhs_ref.coeff(idx); \ + if (op_is_div) { \ + OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \ + errors::InvalidArgument( \ + "SparseDenseCwiseDiv cannot divide by zero," \ + "but input dense tensor contains zero ")); \ + } \ } \ break; \ } From b34966976aee96bb1721692d05fb4f6b7dc5b590 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Fri, 14 May 2021 22:07:07 -0700 Subject: [PATCH 187/256] Fix accessing possible nullptr in tensorflow::data::CompressElement and UncompressElement which are used in tf.data.service. PiperOrigin-RevId: 373920841 Change-Id: Ia88d78aee09fa19bb53a0f163fd19620d0c68743 --- tensorflow/core/data/compression_utils.cc | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tensorflow/core/data/compression_utils.cc b/tensorflow/core/data/compression_utils.cc index d132bdca8dabfc..f550b150ce945d 100644 --- a/tensorflow/core/data/compression_utils.cc +++ b/tensorflow/core/data/compression_utils.cc @@ -29,9 +29,10 @@ Status CompressElement(const std::vector& element, int64 total_size = 0; for (auto& component : element) { if (DataTypeCanUseMemcpy(component.dtype())) { - // Some datatypes can be memcopied, allowing us to save two copies - // (AsProtoTensorContent and SerializeToArray). - total_size += DMAHelper::buffer(&component)->size(); + const TensorBuffer* buffer = DMAHelper::buffer(&component); + if (buffer) { + total_size += buffer->size(); + } } else { non_memcpy_components.emplace_back(); component.AsProtoTensorContent(&non_memcpy_components.back()); @@ -53,8 +54,10 @@ Status CompressElement(const std::vector& element, component.shape().AsProto(metadata->mutable_tensor_shape()); if (DataTypeCanUseMemcpy(component.dtype())) { const TensorBuffer* buffer = DMAHelper::buffer(&component); - memcpy(position, buffer->data(), buffer->size()); - metadata->set_tensor_size_bytes(buffer->size()); + if (buffer) { + memcpy(position, buffer->data(), buffer->size()); + metadata->set_tensor_size_bytes(buffer->size()); + } } else { TensorProto& proto = non_memcpy_components[non_memcpy_component_index++]; proto.SerializeToArray(position, proto.ByteSizeLong()); @@ -94,8 +97,13 @@ Status UncompressElement(const CompressedElement& compressed, if (DataTypeCanUseMemcpy(metadata.dtype())) { out->emplace_back(metadata.dtype(), metadata.tensor_shape()); TensorBuffer* buffer = DMAHelper::buffer(&out->back()); - iov[i].iov_base = buffer->data(); - iov[i].iov_len = buffer->size(); + if (buffer) { + iov[i].iov_base = buffer->data(); + iov[i].iov_len = buffer->size(); + } else { + iov[i].iov_base = nullptr; + iov[i].iov_len = 0; + } } else { // Allocate an empty Tensor. We will fill it out later after // uncompressing into the tensor_proto_str. From 716a669358b643e0bba3589ca9af6b718bdba3b2 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Mon, 12 Jul 2021 09:59:54 -0700 Subject: [PATCH 188/256] Fix null ptr deref in tf.raw_ops.RaggedTensorToTensor PiperOrigin-RevId: 384257511 Change-Id: I0484ad285039d132d6c41b284a7fcdd2b774a38e --- tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index 8905647fa6b64f..bffa35875e553d 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -348,6 +348,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) { const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); + if (row_partition_types_.empty()) { + return errors::InvalidArgument("No row_partition_types given."); + } const RowPartitionType first_partition_type = row_partition_types_[0]; switch (first_partition_type) { case RowPartitionType::FIRST_DIM_SIZE: From 31febcdb7eca32eb2c27bf790d6190a2b8a81835 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Fri, 9 Jul 2021 17:32:55 -0700 Subject: [PATCH 189/256] Validate num_elements input in tf.raw_ops.TensorListReserve PiperOrigin-RevId: 383954564 Change-Id: I454bd78eff85bc4f16ddb7e608596971cca47f8f --- tensorflow/core/kernels/list_kernels.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/list_kernels.cc b/tensorflow/core/kernels/list_kernels.cc index 9a2f373f5ce0cf..488e02337f707b 100644 --- a/tensorflow/core/kernels/list_kernels.cc +++ b/tensorflow/core/kernels/list_kernels.cc @@ -302,6 +302,10 @@ class TensorListReserve : public OpKernel { PartialTensorShape element_shape; OP_REQUIRES_OK(c, TensorShapeFromTensor(c->input(0), &element_shape)); int32 num_elements = c->input(1).scalar()(); + OP_REQUIRES(c, num_elements >= 0, + errors::InvalidArgument("The num_elements to reserve must be a " + "non negative number, but got ", + num_elements)); TensorList output; output.element_shape = element_shape; output.element_dtype = element_dtype_; From 39ee9a577bb4bc697500bfc678b0c42b3e6a2ec4 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Fri, 9 Jul 2021 17:45:15 -0700 Subject: [PATCH 190/256] Ensure non-empty compressed input in tf.raw_ops.UncompressElement PiperOrigin-RevId: 383955815 Change-Id: I072a84fd02738dd2f51b3f42836ed80067dba4a8 --- tensorflow/core/kernels/data/experimental/compression_ops.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/data/experimental/compression_ops.cc b/tensorflow/core/kernels/data/experimental/compression_ops.cc index efa7018acb6293..8cc214671bd742 100644 --- a/tensorflow/core/kernels/data/experimental/compression_ops.cc +++ b/tensorflow/core/kernels/data/experimental/compression_ops.cc @@ -48,6 +48,11 @@ void UncompressElementOp::Compute(OpKernelContext* ctx) { Tensor tensor = ctx->input(0); const Variant& variant = tensor.scalar()(); const CompressedElement* compressed = variant.get(); + OP_REQUIRES( + ctx, compressed != nullptr, + errors::InvalidArgument( + "Input does not contain a compressed element. Instead got tensor ", + tensor.DebugString())); std::vector components; OP_REQUIRES_OK(ctx, UncompressElement(*compressed, &components)); From 528ebc948a58ac6353305f8345a50ab35717dbdb Mon Sep 17 00:00:00 2001 From: Daniel Ellis Date: Wed, 14 Jul 2021 12:43:17 -0700 Subject: [PATCH 191/256] Fix segmentation fault in shape inference logic. When running shape functions, some functions (such as `MutableHashTableShape`) produce extra output information in the form of a `ShapeAndType` struct. The shapes embedded in this struct are owned by an inference context that is cleaned up almost immediately; if the upstream code attempts to access this shape information, it can trigger a segfault. `ShapeRefiner` is mitigating this for normal output shapes by cloning them (and thus putting the newly created shape under ownership of an inference context that will not die), but we were not doing the same for shapes and types. This commit fixes that by doing similar logic on output shapes and types. PiperOrigin-RevId: 384761124 Change-Id: I07c0c42d29dfbb55bfa13ec1f09ef825fb0a1a1d --- .../core/common_runtime/shape_refiner.cc | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc index a968aaf09b6ad3..6a7d1eadfb66b6 100644 --- a/tensorflow/core/common_runtime/shape_refiner.cc +++ b/tensorflow/core/common_runtime/shape_refiner.cc @@ -117,9 +117,26 @@ Status InferShapesForFunctionSubNode(const Node* node, ShapeRefiner* refiner, TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle)); outer_context->set_output(index, handle); - auto* resource = node_context->input_handle_shapes_and_types(0); + const std::vector* resource = + node_context->input_handle_shapes_and_types(0); if (resource) { - outer_context->set_output_handle_shapes_and_types(index, *resource); + // `ShapesAndType`s contain `ShapeHandle`s. These `ShapeHandle`s point + // to `Shape`s that are owned by a different inference context too. We + // need to copy them to the outer context to prevent them from being + // destroyed before they are used. + std::vector copied_shapes_and_types; + for (auto& shape_and_type : *resource) { + ShapeHandle handle; + TensorShapeProto proto; + node_context->ShapeHandleToProto(shape_and_type.shape, &proto); + TF_RETURN_IF_ERROR( + outer_context->MakeShapeFromShapeProto(proto, &handle)); + copied_shapes_and_types.push_back( + ShapeAndType(handle, shape_and_type.dtype, shape_and_type.type)); + } + + outer_context->set_output_handle_shapes_and_types( + index, copied_shapes_and_types); } } From d8e07ff51f9e709399b8c553290836fb308e45ed Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Tue, 27 Jul 2021 16:08:12 -0700 Subject: [PATCH 192/256] Update shape_refiner.cc --- tensorflow/core/common_runtime/shape_refiner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc index 6a7d1eadfb66b6..906bd14f96c847 100644 --- a/tensorflow/core/common_runtime/shape_refiner.cc +++ b/tensorflow/core/common_runtime/shape_refiner.cc @@ -132,7 +132,7 @@ Status InferShapesForFunctionSubNode(const Node* node, ShapeRefiner* refiner, TF_RETURN_IF_ERROR( outer_context->MakeShapeFromShapeProto(proto, &handle)); copied_shapes_and_types.push_back( - ShapeAndType(handle, shape_and_type.dtype, shape_and_type.type)); + ShapeAndType(handle, shape_and_type.dtype, shape_and_type.specialized_type)); } outer_context->set_output_handle_shapes_and_types( From 3abfbf613ab0571657d14b704f5497429abd7cdc Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Tue, 27 Jul 2021 12:11:33 -0700 Subject: [PATCH 193/256] [Cherrypick2.3] Disallow empty node_id_range in tf.raw_ops.BoostedTreesCalculateBestFeatureSplitV2 and tf.raw_ops.BoostedTreesCalculateBestGainsPerFeature --- .../core/kernels/boosted_trees/stats_ops.cc | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index 851e5b78e847b7..4b4b78356e2bd1 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -51,6 +51,16 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES( + context, node_id_range_t->dims() == 1, + errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " + "given node_id_range has dims of ", + node_id_range_t->dims())); + OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, + errors::InvalidArgument( + "node_id_range must be a rank 1 tensor with shape=[2], but " + "given node_id_range has shape ", + node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive @@ -567,9 +577,19 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES( + context, node_id_range_t->dims() == 1, + errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " + "given node_id_range has dims of ", + node_id_range_t->dims())); + OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, + errors::InvalidArgument( + "node_id_range must be a rank 1 tensor with shape=[2], but " + "given node_id_range has shape ", + node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec(); - const int32 node_id_first = node_id_range(0); // Inclusive. - const int32 node_id_last = node_id_range(1); // Exclusive. + const int32_t node_id_first = node_id_range(0); // Inclusive. + const int32_t node_id_last = node_id_range(1); // Exclusive. // Get stats_summaries_list. OpInputList stats_summaries_list; From 84acf423e427320ae6fae44c08d2c52205b2dec0 Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Wed, 28 Jul 2021 12:59:32 -0700 Subject: [PATCH 194/256] Update stats_ops.cc --- tensorflow/core/kernels/boosted_trees/stats_ops.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index 4b4b78356e2bd1..2e386106ac9fde 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -588,8 +588,8 @@ class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { "given node_id_range has shape ", node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec(); - const int32_t node_id_first = node_id_range(0); // Inclusive. - const int32_t node_id_last = node_id_range(1); // Exclusive. + const int32 node_id_first = node_id_range(0); // Inclusive. + const int32 node_id_last = node_id_range(1); // Exclusive. // Get stats_summaries_list. OpInputList stats_summaries_list; From 3366e13671d43a248d9bdbea151407587f1b3f05 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Tue, 27 Jul 2021 12:35:03 -0700 Subject: [PATCH 195/256] [Cherrypick2.3] In tf.raw_ops.BoostedTreesSparseCalculateBestFeatureSplit, limit stat_dim in stats_summary_indices to under stats_dims in stats_summary_shape --- tensorflow/core/kernels/boosted_trees/stats_ops.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index 851e5b78e847b7..41fce815e70be3 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -1025,6 +1025,13 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { const int32 feature_dim = stats_summary_indices(idx, 1); const int32 bucket_id = stats_summary_indices(idx, 2); const int32 stat_dim = stats_summary_indices(idx, 3); + OP_REQUIRES(context, stat_dim < stats_dims, + errors::InvalidArgument( + "Stat dim, the sum of logits dim and hessian dim in " + "stats_summary_indices, cannot be greater than stats " + "dims, the last value in stats_summary_shape, which was ", + stats_dims, ". At index (", idx, + ", 4), stats_summary_indices contains value ", stat_dim)); std::pair const& f_insert_result = f_map.insert( FeatureMapIterator::value_type(feature_dim, BucketMap())); auto& b_map = f_insert_result.first->second; From e4cb9e7a7de8a55b4e3867d7d7e73f71612bbb24 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Tue, 27 Jul 2021 17:19:57 -0700 Subject: [PATCH 196/256] Secure tf.raw_ops.QuantizeV2 Validate size and shape of min_range and max_range Ensure axis is within input dims limits PiperOrigin-RevId: 387232799 Change-Id: I36975281f7b5758e9e31a8dcc73fe610ef456318 --- tensorflow/core/kernels/quantize_op.cc | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tensorflow/core/kernels/quantize_op.cc b/tensorflow/core/kernels/quantize_op.cc index a523c4b9cd0249..098991e4f436d8 100644 --- a/tensorflow/core/kernels/quantize_op.cc +++ b/tensorflow/core/kernels/quantize_op.cc @@ -113,7 +113,50 @@ class QuantizeV2Op : public OpKernel { int num_slices = 1; if (axis_ > -1) { + OP_REQUIRES( + ctx, input.dims() > axis_, + errors::InvalidArgument( + "Axis is on a zero-based index, so its value must always be less " + "than number of input's dims, but given axis value was ", + axis_, " and input's dims was ", input.dims())); num_slices = input.dim_size(axis_); + OP_REQUIRES(ctx, input_min_range.dims() == 1, + errors::InvalidArgument( + "If axis is specified, min_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but min_range dims are ", + input_min_range.dims())); + OP_REQUIRES(ctx, input_min_range.dim_size(0) == num_slices, + errors::InvalidArgument( + "If axis is specified, min_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but min_range is a 1-D tensor of size ", + input_min_range.dim_size(0), + " and input's axis dimension is of size ", num_slices)); + OP_REQUIRES(ctx, input_max_range.dims() == 1, + errors::InvalidArgument( + "If axis is specified, max_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but max_range dims are ", + input_max_range.dims())); + OP_REQUIRES(ctx, input_max_range.dim_size(0) == num_slices, + errors::InvalidArgument( + "If axis is specified, max_range must be a 1-D tensor " + "whose size matches the axis dimension of the input and " + "output tensors, but max_range is a 1-D tensor of size ", + input_max_range.dim_size(0), + " and input's axis dimension is of size ", num_slices)); + } else { + OP_REQUIRES(ctx, input_min_range.NumElements() == 1, + errors::InvalidArgument( + "If axis is not specified, min_range must contain a " + "single float element, but it contains ", + input_min_range.NumElements(), " elements")); + OP_REQUIRES(ctx, input_max_range.NumElements() == 1, + errors::InvalidArgument( + "If axis is not specified, max_range must contain a " + "single float element, but it contains ", + input_max_range.NumElements(), " elements")); } const TensorShape& minmax_shape = ctx->input(1).shape(); From 1ed1a632a2a525cd5bf05ea1793ef320079f16b5 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Tue, 27 Jul 2021 12:30:33 -0700 Subject: [PATCH 197/256] Ensure non-empty input_splits in tf.raw_ops.UnicodeEncode PiperOrigin-RevId: 387170080 Change-Id: I3b489acc51c5cb4124c535b9df7cc6e62ef21766 --- tensorflow/core/kernels/unicode_ops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/unicode_ops.cc b/tensorflow/core/kernels/unicode_ops.cc index e6c8f4dfc42284..ab09dbe1d54293 100644 --- a/tensorflow/core/kernels/unicode_ops.cc +++ b/tensorflow/core/kernels/unicode_ops.cc @@ -533,6 +533,10 @@ class UnicodeEncodeOp : public OpKernel { const Tensor& input_splits = context->input(1); const auto input_splits_flat = input_splits.flat(); + OP_REQUIRES( + context, input_splits.NumElements() > 0, + errors::InvalidArgument("Input_splits should contain elements, but " + "given input_values has 0 elements")); // Operation will treat first argument in input_splits as if it were zero // regardless of its actual value since splits should begin with zero and // end with the length of the input values vector. From 295d88b8872fe48adeae26db1b4f16ed6f8f6a06 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Tue, 27 Jul 2021 10:55:35 -0700 Subject: [PATCH 198/256] Disallow negative ngram_widths values in tf.raw_ops.StringNGrams PiperOrigin-RevId: 387148179 Change-Id: I641395a09a208be72ef9b3ceb128cf8a83a0775b --- tensorflow/core/kernels/string_ngrams_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 7008a1d766af25..97af9abc4454ac 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -53,6 +53,12 @@ class StringNGramsOp : public tensorflow::OpKernel { } void Compute(tensorflow::OpKernelContext* context) override { + for (int ngram_width : ngram_widths_) { + OP_REQUIRES( + context, ngram_width > 0, + errors::InvalidArgument("ngram_widths must contain positive values")); + } + const tensorflow::Tensor* data; OP_REQUIRES_OK(context, context->input("data", &data)); const auto& input_data = data->flat().data(); From 8880ea8e6da9264aa7d6d32ee6ffe29f0fc4283d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 21:11:18 -0700 Subject: [PATCH 199/256] Prevent heap OOB in sparse reduction ops. PiperOrigin-RevId: 387934524 Change-Id: I894aa30f1e454f09b471d565b4a325da49322c1a --- tensorflow/core/kernels/sparse_reduce_op.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc index b65f31e5eb174e..eb56b7390b0f9a 100644 --- a/tensorflow/core/kernels/sparse_reduce_op.cc +++ b/tensorflow/core/kernels/sparse_reduce_op.cc @@ -219,7 +219,20 @@ class SparseReduceOp : public OpKernel { sp.Reorder(reduction.reorder_dims); for (const auto &g : sp.group(reduction.group_by_dims)) { Op::template Run(ctx, reduced_val, g.template values()); + OP_REQUIRES(ctx, + output_strides.empty() || + (g.group().size() == output_strides.size()), + errors::Internal( + "Expected group size and output_strides size to match", + ", but got ", g.group().size(), " and ", + output_strides.size())); const int64 idx = CoordinatesToFlatIndex(g.group(), output_strides); + OP_REQUIRES(ctx, + idx >= 0 && idx < out_flat.size(), + errors::Internal( + "Obtained a write index of ", idx, + " which is outside of bounds of [0, ", + out_flat.size(), ")")); out_flat(idx) = reduced_val(); VLOG(2) << "coords: " << absl::StrJoin(g.group(), ",") << "; idx: " << idx << "; group " << Op::Name() << ": " From 575ad42ad406994f2c1df60d79795a96d7091305 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 2 Aug 2021 14:21:41 -0700 Subject: [PATCH 200/256] Fix NPE in restoring code. PiperOrigin-RevId: 388303253 Change-Id: Ia8c68568cb854bca538909a182b31a618d68ce55 --- tensorflow/core/kernels/save_restore_tensor.cc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/save_restore_tensor.cc b/tensorflow/core/kernels/save_restore_tensor.cc index 1a5b6b92bd6a0a..6b91ccf26c9320 100644 --- a/tensorflow/core/kernels/save_restore_tensor.cc +++ b/tensorflow/core/kernels/save_restore_tensor.cc @@ -151,11 +151,18 @@ void RestoreTensor(OpKernelContext* context, context, size == 1, errors::InvalidArgument( "Input 0 (file_pattern) must be a string scalar; got a tensor of ", - size, "elements")); + size, " elements")); } const string& file_pattern = file_pattern_t.flat()(0); const Tensor& tensor_name_t = context->input(1); + { + const int64_t size = tensor_name_t.NumElements(); + OP_REQUIRES(context, size > restore_index, + errors::InvalidArgument( + "Input 1 (file_pattern) must be a have at least ", + restore_index + 1, " elements")); + } const string& tensor_name = tensor_name_t.flat()(restore_index); // If we cannot find a cached reader we will allocate our own. From 7abc3c9e7d7e8f073f145cd5e1ff2fda1aebe27b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 3 Aug 2021 12:28:58 -0700 Subject: [PATCH 201/256] Disallow division by zero FPE in `tf.raw_ops.ResourceScatterDiv` Had to update a test that was broken. PiperOrigin-RevId: 388516976 Change-Id: Ic358e6bf0559e011539974d453fc7aa18b427e9c --- .../core/kernels/resource_variable_ops.cc | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index b9c883c7e2ff30..e056d9cb0f9d4a 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -844,6 +844,35 @@ TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU); #undef REGISTER_GATHER_ND_ALL_INDICES #undef REGISTER_GATHER_ND_FULL +namespace { + +template +bool isCPUDevice() { + return false; +} + +template <> +bool isCPUDevice() { + return true; +} + +template +bool ValidateInput(const Tensor& updates) { + const auto updates_flat = updates.flat(); + const T zero(0); + for (int i = 0; i < updates.NumElements(); i++) { + if (updates_flat(i) == zero) return false; + } + return true; +} + +template <> +bool ValidateInput(const Tensor& updates) { + return true; +} + +} // namespace + template class ResourceScatterUpdateOp : public OpKernel { public: @@ -910,6 +939,12 @@ class ResourceScatterUpdateOp : public OpKernel { " indexing: ", params->dim_size(0), " > ", std::numeric_limits::max())); + // Prevent division by 0 + if (isCPUDevice() && op == tensorflow::scatter_op::UpdateOp::DIV) { + OP_REQUIRES(c, ValidateInput(updates), + errors::InvalidArgument("updates must not contain 0")); + } + if (N > 0) { auto indices_flat = indices.flat(); auto params_flat = params->flat_outer_dims(); From 79151cda53f4372bbfc2381bd6948820d8bafa9b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 2 Aug 2021 19:05:27 -0700 Subject: [PATCH 202/256] Fix heap OOB in `tf.raw_ops.RaggedGather` PiperOrigin-RevId: 388355464 Change-Id: If14d96231d1cd7aad7c4d1c22c1bab1576b75717 --- tensorflow/core/kernels/ragged_gather_op.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/ragged_gather_op.cc b/tensorflow/core/kernels/ragged_gather_op.cc index 88c0d1ebd6959b..5939b4120beeca 100644 --- a/tensorflow/core/kernels/ragged_gather_op.cc +++ b/tensorflow/core/kernels/ragged_gather_op.cc @@ -58,15 +58,21 @@ class RaggedGatherOpBase : public OpKernel { void Compute(OpKernelContext* context) override { // Get the input Tensors. + OpInputList params_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("params_nested_splits", ¶ms_nested_splits_in)); + OP_REQUIRES( + context, params_nested_splits_in.size() > 0, + errors::InvalidArgument("params_nested_splits must be non empty")); + const Tensor& params_dense_values_in = context->input(params_nested_splits_in.size()); const Tensor& indices_in = context->input(params_nested_splits_in.size() + 1); - DCHECK_GT(params_nested_splits_in.size(), 0); // Enforced by REGISTER_OP. + OP_REQUIRES(context, params_nested_splits_in[0].dims() > 0, + errors::InvalidArgument("Split tensors must not be scalars")); SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1; OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params)); From 9f56c6ae0c3c9d8cb3be02d9dd6273897b3efa73 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Mon, 2 Aug 2021 15:07:31 -0700 Subject: [PATCH 203/256] Ensure non-empty padding_value input to tf.raw_ops.MatrixDiagPartV2, if a padding_value is input PiperOrigin-RevId: 388314614 Change-Id: If0b51ad58d5d8543a6be6ce8f42ae4755c80d55f --- tensorflow/core/kernels/matrix_diag_op.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/matrix_diag_op.cc b/tensorflow/core/kernels/matrix_diag_op.cc index 016356dfa5336f..16a9e1817badd8 100644 --- a/tensorflow/core/kernels/matrix_diag_op.cc +++ b/tensorflow/core/kernels/matrix_diag_op.cc @@ -86,7 +86,10 @@ class MatrixDiagPartOp : public OpKernel { upper_diag_index = diag_index.flat()(1); } } - padding_value = context->input(2).flat()(0); + const Tensor& padding_in = context->input(2); + OP_REQUIRES(context, padding_in.NumElements() == 1, + errors::InvalidArgument("Padding must be scalar.")); + padding_value = padding_in.flat()(0); } const TensorShape& input_shape = input.shape(); From f43fc155da74e8b74e905f5f190c0d5d440b1631 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 3 Aug 2021 15:51:47 -0700 Subject: [PATCH 204/256] Prevent nullptr deref in SparseTensorSliceDataset The arguments must determine a valid sparse tensor. This means that when indices are empty then the values must be empty too (and the reverse). Also added test, by modifying existing test with empty sparse tensor to now run with an invalid sparse tensor input. PiperOrigin-RevId: 388562757 Change-Id: Id8b54cd7c2316025b4f9a77292c8fb5344d17609 --- .../data/sparse_tensor_slice_dataset_op.cc | 11 ++++++++++ .../from_sparse_tensor_slices_test.py | 20 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc index 1e3ed53d6c6d6e..212c2b4e96715f 100644 --- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc +++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc @@ -237,6 +237,17 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel { errors::InvalidArgument( "Input indices should be a matrix but received shape ", indices->shape().DebugString())); + + const auto num_indices = indices->NumElements(); + const auto num_values = values->NumElements(); + if (num_indices == 0 || num_values == 0) { + OP_REQUIRES(ctx, num_indices == num_values, + errors::InvalidArgument( + "If indices or values are empty, the other one must also " + "be. Got indices of shape ", + indices->shape().DebugString(), " and values of shape ", + values->shape().DebugString())); + } OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", diff --git a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py index d7a2c158de9a13..444b83cb72e5d4 100644 --- a/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py +++ b/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py @@ -84,6 +84,26 @@ def testFromSparseTensorSlices(self): with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) + @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) + def testEmptySparseTensorSlicesInvalid(self): + """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" + st = array_ops.sparse_placeholder(dtypes.float64) + iterator = dataset_ops.make_initializable_iterator( + dataset_ops.Dataset.from_sparse_tensor_slices(st)) + init_op = iterator.initializer + + with self.cached_session() as sess: + # Test with an empty sparse tensor but with non empty values. + empty_indices = np.empty((0, 4), dtype=np.int64) + non_empty_values = [1, 2, 3, 4] + empty_dense_shape = [0, 4, 37, 9] + sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, + non_empty_values, + empty_dense_shape) + # Here, we expect the test to fail when running the feed. + with self.assertRaises(errors.InvalidArgumentError): + sess.run(init_op, feed_dict={st: sparse_feed}) + @combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"])) def testFromSparseTensorSlicesError(self): with self.assertRaises(AttributeError): From 190548f9c08530e493933d217b071e5b3d909ea2 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 19:05:41 -0700 Subject: [PATCH 205/256] Ensure validation sticks in `save_restore_v2_ops.cc` PiperOrigin-RevId: 387924206 Change-Id: I6156842eb3230076b5812c0815f3e66bd5241454 --- tensorflow/core/kernels/save_restore_v2_ops.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/save_restore_v2_ops.cc b/tensorflow/core/kernels/save_restore_v2_ops.cc index 07e120e042cf3f..97e94cbd02ac03 100644 --- a/tensorflow/core/kernels/save_restore_v2_ops.cc +++ b/tensorflow/core/kernels/save_restore_v2_ops.cc @@ -98,6 +98,7 @@ class SaveV2 : public OpKernel { const Tensor& shape_and_slices = context->input(2); ValidateInputs(true /* is save op */, context, prefix, tensor_names, shape_and_slices); + if (!context->status().ok()) return; const int kFixedInputs = 3; // Prefix, tensor names, shape_and_slices. const int num_tensors = static_cast(tensor_names.NumElements()); @@ -156,6 +157,7 @@ class RestoreV2 : public OpKernel { " expected dtypes.")); ValidateInputs(false /* not save op */, context, prefix, tensor_names, shape_and_slices); + if (!context->status().ok()) return; const string& prefix_string = prefix.scalar()(); From 647e265b65d192b60ac35935dfebadf9caafa222 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 14:58:43 -0700 Subject: [PATCH 206/256] Fix segfault/heap buffer overflow in `{Experimental,}DatasetToTFRecord` where dataset is numeric. Code assumes only strings inputs and then interprets numbers as valid `tstring`s. Then, when trying to compute the CRC of the record this results in heap buffer overflow. PiperOrigin-RevId: 387675909 Change-Id: I7396b9b8afc1ac744112af7c0b1cd7bb41e0f556 --- .../kernels/data/experimental/to_tf_record_op.cc | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc index bfa894cd473b40..56401bb91f5753 100644 --- a/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc +++ b/tensorflow/core/kernels/data/experimental/to_tf_record_op.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/core/framework/function_handle_cache.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" +#include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/data/dataset_utils.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/threadpool.h" @@ -87,8 +88,20 @@ class ToTFRecordOp : public AsyncOpKernel { TF_RETURN_IF_ERROR(dataset->MakeIterator( &iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator)); + const int num_output_dtypes = dataset->output_dtypes().size(); + if (num_output_dtypes != 1) { + return errors::InvalidArgument( + "ToTFRecordOp currently only support datasets of 1 single column, ", + "but got ", num_output_dtypes); + } + const DataType dt = dataset->output_dtypes()[0]; + if (dt != DT_STRING) { + return errors::InvalidArgument( + "ToTFRecordOp currently only supports DT_STRING dataypes, but got ", + DataTypeString(dt)); + } std::vector components; - components.reserve(dataset->output_dtypes().size()); + components.reserve(num_output_dtypes); bool end_of_sequence; do { TF_RETURN_IF_ERROR( From c91ba1ce78af68d1918cb1709b973d2ebcff1b50 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 2 Aug 2021 13:03:44 -0700 Subject: [PATCH 207/256] Validate dimensions of input tensor in `FractionalAvgPoolGrad` PiperOrigin-RevId: 388286227 Change-Id: Ieb7566155e92acc8993a2212c76deacadc0edc8a --- tensorflow/core/kernels/fractional_avg_pool_op.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index 0452638a066795..7c396126427473 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -271,6 +271,18 @@ class FractionalAvgPoolGradOp : public OpKernel { const int64 in_rows = orig_input_tensor_shape_flat(1); const int64 in_cols = orig_input_tensor_shape_flat(2); const int64 in_depth = orig_input_tensor_shape_flat(3); + OP_REQUIRES( + context, in_batch != 0, + errors::InvalidArgument("Batch dimension of input must not be 0")); + OP_REQUIRES( + context, in_rows != 0, + errors::InvalidArgument("Rows dimension of input must not be 0")); + OP_REQUIRES( + context, in_cols != 0, + errors::InvalidArgument("Columns dimension of input must not be 0")); + OP_REQUIRES( + context, in_depth != 0, + errors::InvalidArgument("Depth dimension of input must not be 0")); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape From f9043fa6b373bea315bf09eddfefa1356525f856 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 19:13:19 -0700 Subject: [PATCH 208/256] Prevent use after free. A very old version of the code used `result` as a simple pointer to a resource. Two years later, the pointer got changed to a `unique_ptr` but author forgot to remove the call to `Unref`. Three years after that, we finally uncover the UAF. PiperOrigin-RevId: 387924872 Change-Id: I70fb6f199164de49fac20c168132a07b84903f9b --- tensorflow/core/kernels/boosted_trees/resource_ops.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/core/kernels/boosted_trees/resource_ops.cc b/tensorflow/core/kernels/boosted_trees/resource_ops.cc index ac1fb5652da5f9..8036f2b20f36bb 100644 --- a/tensorflow/core/kernels/boosted_trees/resource_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/resource_ops.cc @@ -53,6 +53,7 @@ class BoostedTreesCreateEnsembleOp : public OpKernel { if (!result->InitFromSerialized( tree_ensemble_serialized_t->scalar()(), stamp_token)) { result->Unref(); + result.release(); // Needed due to the `->Unref` above, to prevent UAF OP_REQUIRES( context, false, errors::InvalidArgument("Unable to parse tree ensemble proto.")); From 42828224443dac005f03aa593087f80990869417 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 22:23:28 -0700 Subject: [PATCH 209/256] Prevent division by 0 in `resource_variable_ops.cc` PiperOrigin-RevId: 387939939 Change-Id: Ib04902d63756633999959a70613f2eaa30c2c151 --- tensorflow/core/kernels/resource_variable_ops.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index e056d9cb0f9d4a..5621e7bf7435c1 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -688,7 +688,8 @@ class ResourceGatherOp : public OpKernel { copy_functor(c->eigen_device(), tmp_indices.flat(), indices.flat()); - AddBatchOffsets(&tmp_indices, params); + AddBatchOffsets(c, &tmp_indices, params); + if (!c->status().ok()) return; op_indices = &tmp_indices; } @@ -720,11 +721,17 @@ class ResourceGatherOp : public OpKernel { // Example: batch_dims = 1, indices = [[0, 1, 2], [0, 1, 2]] // If indexing into a params dimension of size 4, then the indices will become // [0, 1, 2, 4, 5, 6] - void AddBatchOffsets(Tensor* indices, const Tensor& params) { + void AddBatchOffsets(OpKernelContext* ctx, Tensor* indices, + const Tensor& params) { int64 batch_size = 1; // The size of all batch dimensions. for (int idx = 0; idx < batch_dims_; ++idx) { batch_size *= params.dim_size(idx); } + OP_REQUIRES( + ctx, batch_size != 0, + errors::InvalidArgument( + "Inner size of indices would result in batch_size of 0 and a ", + "division by 0 in the implementation. This is illegal")); auto indices_flat = indices->flat(); int64 const index_inner_size = indices->NumElements() / batch_size; From f3a61584f09cda7c4788e13dd918467c95312115 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 21:37:59 -0700 Subject: [PATCH 210/256] Prevent heap oob access in `resource_variable_ops.cc` PiperOrigin-RevId: 387936433 Change-Id: I9e71ddaa8dbd51ec6afbf163a6b3b591f193b4f6 --- tensorflow/core/kernels/resource_variable_ops.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index e056d9cb0f9d4a..e4c2dd79195419 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -643,6 +643,11 @@ class ResourceGatherOp : public OpKernel { OP_REQUIRES( c, TensorShapeUtils::IsVectorOrHigher(params.shape()), errors::InvalidArgument("params must be at least 1 dimensional")); + OP_REQUIRES( + c, params.shape().dims() >= batch_dims_, + errors::InvalidArgument("params must have at least ", batch_dims_, + " (batch_dims) dimensions but it has shape ", + params.shape().DebugString())); // Check that we have enough index space const int64 N = indices.NumElements(); From ceaa513cd6b10ac4e50d32cc2c96877563683817 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 2 Aug 2021 13:33:05 -0700 Subject: [PATCH 211/256] Fix heap OOB due to dimension mismatch in `ResourceScatterUpdate` PiperOrigin-RevId: 388292801 Change-Id: Id9bd7244d98d41b1517d4771850b32782c0cc949 --- tensorflow/core/kernels/resource_variable_ops.cc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index 56cf9b1bf0370e..f75cc50a8df752 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -973,11 +973,12 @@ class ResourceScatterUpdateOp : public OpKernel { params->dim_size(0), ")")); } else { int64 num_updates = updates.NumElements(); - OP_REQUIRES(c, num_updates % N == 0, - errors::InvalidArgument( - "shape of indices (", indices.shape().DebugString(), - ") is not compatible with the shape of updates (", - updates.shape().DebugString(), ")")); + OP_REQUIRES( + c, TensorShapeUtils::StartsWith(updates.shape(), indices.shape()), + errors::InvalidArgument( + "The shape of indices (", indices.shape().DebugString(), + ") must be a prefix of the shape of updates (", + updates.shape().DebugString(), ")")); auto updates_flat = updates.shaped({N, num_updates / N}); functor::ScatterFunctor functor; From b58bfcc5139907a254b55c856f602ad57de50361 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 18:23:29 -0700 Subject: [PATCH 212/256] Add missing validation to `RaggedTensorToSparse`. There needs to be a check that the splits allow for valid ragged tensors. PiperOrigin-RevId: 387712169 Change-Id: I2499175324b82b65d159a260c7f83b98ceb5cc7d --- .../core/kernels/ragged_tensor_to_sparse_kernel.cc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc b/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc index 39b530f4a15ead..336a38fa58fc8b 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -38,7 +39,8 @@ class RaggedTensorToSparseOp : public OpKernel { OP_REQUIRES_OK( context, context->input_list("rt_nested_splits", &rt_nested_splits_in)); const int rt_nested_splits_len = rt_nested_splits_in.size(); - DCHECK_GT(rt_nested_splits_len, 0); // Enforced by REGISTER_OP. + OP_REQUIRES(context, rt_nested_splits_len > 0, + errors::InvalidArgument("rt_nested_splits must be non empty")); std::vector rt_nested_splits; rt_nested_splits.reserve(rt_nested_splits_len); for (int i = 0; i < rt_nested_splits_len; ++i) { @@ -161,6 +163,14 @@ class RaggedTensorToSparseOp : public OpKernel { if (rt_nested_splits[i](0) != 0) { return InvalidArgument("First value of ragged splits must be 0."); } + for (int j = 1; j < rt_nested_splits[i].size(); ++j) { + if (rt_nested_splits[i](j) < rt_nested_splits[i](j - 1)) { + return InvalidArgument( + "Ragged splits should be non decreasing, but we got ", + rt_nested_splits[i](j - 1), " followed by ", + rt_nested_splits[i](j)); + } + } if (i > 0) { SPLITS_TYPE last_split = rt_nested_splits[i - 1](rt_nested_splits[i - 1].size() - 1); From ec3ba7777545d2af9e1179350bfd8de3c846bbec Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 19:00:00 -0700 Subject: [PATCH 213/256] Add missing validation to `matrix_diag_op.cc` PiperOrigin-RevId: 387923533 Change-Id: Idfffeb328d5f9c6748d992d28a56d6e9e45103a0 --- tensorflow/core/kernels/matrix_diag_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/matrix_diag_op.cc b/tensorflow/core/kernels/matrix_diag_op.cc index 16a9e1817badd8..bb5b27c2cbe928 100644 --- a/tensorflow/core/kernels/matrix_diag_op.cc +++ b/tensorflow/core/kernels/matrix_diag_op.cc @@ -73,6 +73,9 @@ class MatrixDiagPartOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES(context, diag_index.NumElements() > 0, + errors::InvalidArgument( + "Expected diag_index to have at least 1 element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { @@ -182,6 +185,9 @@ class MatrixDiagOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES(context, diag_index.NumElements() > 0, + errors::InvalidArgument( + "Expected diag_index to have at least 1 element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { From fbe12b2f9a9280ed55e9c3c9760b85cf45449283 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 18:58:29 -0700 Subject: [PATCH 214/256] Add one missing valdiation to `matrix_set_diag_op.cc` PiperOrigin-RevId: 387923408 Change-Id: If6a97b9098c13879400f56c22f91555cdf0ce5d7 --- tensorflow/core/kernels/matrix_set_diag_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/matrix_set_diag_op.cc b/tensorflow/core/kernels/matrix_set_diag_op.cc index bf98fd0d47d65b..e1c47d814050e1 100644 --- a/tensorflow/core/kernels/matrix_set_diag_op.cc +++ b/tensorflow/core/kernels/matrix_set_diag_op.cc @@ -70,6 +70,9 @@ class MatrixSetDiagOp : public OpKernel { errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); + OP_REQUIRES( + context, diag_index.NumElements() > 0, + errors::InvalidArgument("diag_index must have at least one element")); lower_diag_index = diag_index.flat()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { From 72eb0b01ac3e40f7af90454a15fd59a8e8be4e92 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 21:42:36 -0700 Subject: [PATCH 215/256] Fix nullptr deref and heap OOB access in binary cwise ops. PiperOrigin-RevId: 387936777 Change-Id: I608b8074cec36a982cca622b7144cb2c43e6e19f --- tensorflow/core/kernels/cwise_ops_common.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/cwise_ops_common.h b/tensorflow/core/kernels/cwise_ops_common.h index c0aee43d26800a..45efaf34892135 100644 --- a/tensorflow/core/kernels/cwise_ops_common.h +++ b/tensorflow/core/kernels/cwise_ops_common.h @@ -271,6 +271,11 @@ class SimpleBinaryOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& in0 = ctx->input(0); const Tensor& in1 = ctx->input(1); + OP_REQUIRES( + ctx, in0.NumElements() == in1.NumElements(), + errors::InvalidArgument("The two arguments to a cwise op must have " + "same number of elements, got ", + in0.NumElements(), " and ", in1.NumElements())); auto in0_flat = in0.flat(); auto in1_flat = in1.flat(); const Device& eigen_device = ctx->eigen_device(); From f135f0397755b5ee75e0816921283a1d257359a5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 2 Aug 2021 14:21:27 -0700 Subject: [PATCH 216/256] Fix FPE in inpace update ops. PiperOrigin-RevId: 388303197 Change-Id: Ib48309b6213ffe53eba81004b00e889d653e4b83 --- tensorflow/core/kernels/inplace_ops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/inplace_ops.cc b/tensorflow/core/kernels/inplace_ops.cc index b5191b9989f328..008e9732b55768 100644 --- a/tensorflow/core/kernels/inplace_ops.cc +++ b/tensorflow/core/kernels/inplace_ops.cc @@ -280,7 +280,7 @@ class InplaceOpBase : public OpKernel { Tensor y = x; // This creates an alias intentionally. // Skip processing if tensors are empty. - if (x.NumElements() > 0 || v.NumElements() > 0) { + if (x.NumElements() > 0 && v.NumElements() > 0) { OP_REQUIRES_OK(ctx, DoCompute(ctx, i, v, &y)); } ctx->set_output(0, y); From b65eb1055d2ed056e730711146d3cb3a9c90c9d7 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Wed, 28 Jul 2021 15:34:04 -0700 Subject: [PATCH 217/256] Ensure num_streams >= 0 in tf.raw_ops.BoostedTreesCreateQuantileStreamResource PiperOrigin-RevId: 387452765 Change-Id: I9990c760e177fabca6a3b9b4612ceeaeeba51495 --- tensorflow/core/kernels/boosted_trees/quantile_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc index 0065bdd66aa708..916db1f436148b 100644 --- a/tensorflow/core/kernels/boosted_trees/quantile_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/quantile_ops.cc @@ -116,6 +116,9 @@ class BoostedTreesCreateQuantileStreamResourceOp : public OpKernel { const Tensor* num_streams_t; OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t)); int64 num_streams = num_streams_t->scalar()(); + OP_REQUIRES(context, num_streams >= 0, + errors::InvalidArgument( + "Num_streams input cannot be a negative integer")); auto result = new QuantileStreamResource(epsilon, max_elements_, num_streams); From a4117ba799fb7c509465dacd27ea8467fbc92be6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Jul 2021 13:25:18 -0700 Subject: [PATCH 218/256] Add remaining missing validation to `BoostedTreesCalculateBestFeatureSplit` PiperOrigin-RevId: 387423006 Change-Id: I8eaf30efb223011519e60707bfa751b275d3a443 --- .../core/kernels/boosted_trees/stats_ops.cc | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc index 529a6283cb708b..dc8c4110b47259 100644 --- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include +#include #include #include "third_party/eigen3/Eigen/Core" @@ -22,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h" #include "tensorflow/core/kernels/boosted_trees/tree_helper.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { @@ -254,12 +256,18 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); + OP_REQUIRES( + context, node_id_range_t->NumElements() == 2, + errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec(); const int32 node_id_first = node_id_range(0); // inclusive const int32 node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); + OP_REQUIRES( + context, stats_summary_t->shape().dims() == 4, + errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes::ConstTensor stats_summary = stats_summary_t->tensor(); const int32 feature_dims = stats_summary_t->dim_size(1); @@ -272,6 +280,8 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); + OP_REQUIRES(context, l1_t->NumElements() == 1, + errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { @@ -281,17 +291,25 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); + OP_REQUIRES(context, l2_t->NumElements() == 1, + errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); + OP_REQUIRES( + context, tree_complexity_t->NumElements() == 1, + errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); + OP_REQUIRES( + context, min_node_weight_t->NumElements() == 1, + errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar()(); std::vector output_node_ids; @@ -300,7 +318,7 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { std::vector output_thresholds; std::vector output_left_node_contribs; std::vector output_right_node_contribs; - std::vector output_split_types; + std::vector output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. From 5dbd4ef661059a28f717822448dfef63b2d75f45 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 16:29:20 -0700 Subject: [PATCH 219/256] Add more validation to `RequantizationRangePerChannel`. PiperOrigin-RevId: 387693946 Change-Id: Ife8dcbdb021bec4787eef6a4361dd08f17c14bd6 --- .../mkl_requantization_range_per_channel_op.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tensorflow/core/kernels/mkl_requantization_range_per_channel_op.cc b/tensorflow/core/kernels/mkl_requantization_range_per_channel_op.cc index 0cd4843c0d8659..bb1045c7f513d5 100644 --- a/tensorflow/core/kernels/mkl_requantization_range_per_channel_op.cc +++ b/tensorflow/core/kernels/mkl_requantization_range_per_channel_op.cc @@ -57,6 +57,20 @@ class MklRequantizationRangePerChannelOp : public OpKernel { ctx, input_max.dim_size(0) == depth, errors::InvalidArgument("input_max has incorrect size, expected ", depth, " was ", input_max.dim_size(0))); + OP_REQUIRES( + ctx, input_min.NumElements() == depth, + errors::InvalidArgument("input_min must have the same number of " + "elements as input_max, got ", + input_min.NumElements(), " and ", depth)); + OP_REQUIRES(ctx, input.NumElements() > 0, + errors::InvalidArgument("input must not be empty")); + OP_REQUIRES(ctx, input.dims() == 4, + errors::InvalidArgument("input must be in NHWC format")); + OP_REQUIRES( + ctx, input.dim_size(3) == depth, + errors::InvalidArgument( + "input must have same number of channels as length of input_min: ", + input.dim_size(3), " vs ", depth)); const float* input_min_data = input_min.flat().data(); const float* input_max_data = input_max.flat().data(); From 55ed6adce39e5423f377a6744ae470684acd9eb0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 16:06:23 -0700 Subject: [PATCH 220/256] Reorganize and add more validation to MKL requantization PiperOrigin-RevId: 387901341 Change-Id: I2515b9034c64e113db0bcec8337d30643ab0a0f1 --- .../kernels/mkl_requantize_per_channel_op.cc | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/tensorflow/core/kernels/mkl_requantize_per_channel_op.cc b/tensorflow/core/kernels/mkl_requantize_per_channel_op.cc index 0a0464f648b95b..4cac0e0ecdc76e 100644 --- a/tensorflow/core/kernels/mkl_requantize_per_channel_op.cc +++ b/tensorflow/core/kernels/mkl_requantize_per_channel_op.cc @@ -49,35 +49,45 @@ class MklRequantizePerChannelOp : public OpKernel { void Compute(OpKernelContext* ctx) override { try { const Tensor& input = ctx->input(kInputTensorIndex); + OP_REQUIRES( + ctx, input.dims() == 4, + errors::InvalidArgument("Current RequantizePerChannel operator" + "supports 4D tensors only.")); + const Tensor& input_min_vec = ctx->input(kInputMinVecIndex); + size_t depth = input_min_vec.NumElements(); float* input_min_vec_data = (float*)const_cast( static_cast(input_min_vec.flat().data())); + const Tensor& input_max_vec = ctx->input(kInputMaxVecIndex); + OP_REQUIRES( + ctx, input_max_vec.NumElements() == depth, + errors::InvalidArgument("input_max has incorrect size, expected ", + depth, " was ", input_max_vec.NumElements())); float* input_max_vec_data = (float*)const_cast( static_cast(input_max_vec.flat().data())); const Tensor& input_requested_min = ctx->input(this->kRequestMinIndex); + OP_REQUIRES( + ctx, input_requested_min.NumElements() == 1, + errors::InvalidArgument("requested_output_min must be a scalar")); const float input_requested_min_float = input_requested_min.flat()(0); + const Tensor& input_requested_max = ctx->input(this->kRequestMaxIndex); + OP_REQUIRES( + ctx, input_requested_min.NumElements() == 1, + errors::InvalidArgument("requested_output_max must be a scalar")); const float input_requested_max_float = input_requested_max.flat()(0); - size_t depth = input_min_vec.NumElements(); - OP_REQUIRES( - ctx, input.dims() == 4, - errors::InvalidArgument("Current RequantizePerChannel operator" - "supports 4D tensors only.")); - OP_REQUIRES( - ctx, input_min_vec.dim_size(0) == depth, - errors::InvalidArgument("input_min has incorrect size, expected ", - depth, " was ", input_min_vec.dim_size(0))); - OP_REQUIRES( - ctx, input_max_vec.dim_size(0) == depth, - errors::InvalidArgument("input_max has incorrect size, expected ", - depth, " was ", input_max_vec.dim_size(0))); - - if (out_type_ == DT_QINT8) DCHECK(input_requested_min_float < 0.0f); + if (out_type_ == DT_QINT8) { + OP_REQUIRES(ctx, input_requested_min_float < 0.0f, + errors::InvalidArgument( + "If out_type is QINT8, requested_output_max must be " + "non negative, got ", + input_requested_min_float)); + } const float factor = (out_type_ == DT_QINT8) ? 127.0f : 255.0f; const float requested_min_max = From c3ef7bc42723d3349ba599c694457918bea7c848 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Thu, 29 Jul 2021 14:05:34 -0700 Subject: [PATCH 221/256] Ensure non-empty rt_nested_splits in tf.raw_ops.RaggedTensorToVariant PiperOrigin-RevId: 387664237 Change-Id: Ia1700c34b5610873d63561abc86e23b46ead93b3 --- tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 49c4a2411b8c3c..a3ecc80ce41f9a 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -173,6 +173,12 @@ class RaggedTensorToVariantOp : public OpKernel { return; } + // Checked here instead of at input in case batched_input_ is false + OP_REQUIRES(context, ragged_nested_splits_len > 0, + errors::InvalidArgument( + "rt_nested_splits must be a list of one or more, but " + "received rt_nested_splits of length 0.")); + // Unbatch the Ragged Tensor and encode the components. std::vector ragged_components; auto batched_splits_top_vec = From 253c59515771c8314ca23b25c855ecf069bcc0a3 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Mon, 12 Jul 2021 11:55:27 -0700 Subject: [PATCH 222/256] Disallow dims input of 0 in tf.raw_ops.UnravelIndex PiperOrigin-RevId: 384284198 Change-Id: Ia1804ef1aec57b4d857ea507e6891bcccde18e9b --- tensorflow/core/kernels/unravel_index_op.cc | 8 ++++++++ tensorflow/python/kernel_tests/array_ops_test.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/unravel_index_op.cc b/tensorflow/core/kernels/unravel_index_op.cc index b45ff5e5b85f04..5b895799bbf4c5 100644 --- a/tensorflow/core/kernels/unravel_index_op.cc +++ b/tensorflow/core/kernels/unravel_index_op.cc @@ -53,6 +53,14 @@ class UnravelIndexOp : public OpKernel { dims_tensor.shape().DebugString(), "\"")); auto dims = dims_tensor.vec(); + // Make sure dims does not contain a zero + for (int i = 0; i < dims.size(); i++) { + OP_REQUIRES( + ctx, dims(i) != 0, + errors::InvalidArgument("Input dims cannot contain a dim of zero, " + "but dims contains zero at index ", + i)); + } // Chek to make sure indices is not out of boundary Eigen::Tensor dims_prod_eigen = dims.prod(); diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py index dbff3a1b2f728f..31c5164d922eb4 100644 --- a/tensorflow/python/kernel_tests/array_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops_test.py @@ -1441,7 +1441,7 @@ def testUnravelIndexZeroDim(self): with self.cached_session(): for dtype in [dtypes.int32, dtypes.int64]: with self.assertRaisesRegexp(errors.InvalidArgumentError, - "index is out of bound as with dims"): + "dims cannot contain a dim of zero"): indices = constant_op.constant([2, 5, 7], dtype=dtype) dims = constant_op.constant([3, 0], dtype=dtype) self.evaluate(array_ops.unravel_index(indices=indices, dims=dims)) From 871c9e715e5d8de665236728e042003a8a6c3bd3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 22:02:22 -0700 Subject: [PATCH 223/256] Prevent crash/heap OOB due to integer conversion to unsigned in NMS kernels PiperOrigin-RevId: 387938262 Change-Id: Id361a715307e7179977cf5c64391c199a966f2ad --- tensorflow/core/kernels/non_max_suppression_op.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/core/kernels/non_max_suppression_op.cc b/tensorflow/core/kernels/non_max_suppression_op.cc index 20ae3a2e0d07f6..1919da16c38d3b 100644 --- a/tensorflow/core/kernels/non_max_suppression_op.cc +++ b/tensorflow/core/kernels/non_max_suppression_op.cc @@ -161,6 +161,8 @@ void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores, bool pad_to_max_output_size = false, int* ptr_num_valid_outputs = nullptr) { const int output_size = max_output_size.scalar()(); + OP_REQUIRES(context, output_size >= 0, + errors::InvalidArgument("output size must be non-negative")); std::vector scores_data(num_boxes); std::copy_n(scores.flat().data(), num_boxes, scores_data.begin()); @@ -759,6 +761,9 @@ class NonMaxSuppressionV4Op : public OpKernel { context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); + if (!context->status().ok()) { + return; + } // Allocate scalar output tensor for number of indices computed. Tensor* num_outputs_t = nullptr; @@ -836,6 +841,9 @@ class NonMaxSuppressionV5Op : public OpKernel { context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, soft_nms_sigma_val, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); + if (!context->status().ok()) { + return; + } // Allocate scalar output tensor for number of indices computed. Tensor* num_outputs_t = nullptr; From 368f287936c1ffe815de945fcde81b7077d11e9e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:25:05 -0700 Subject: [PATCH 224/256] Prevent CHECK-fail/heap OOB in UpperBound and LowerBound PiperOrigin-RevId: 387738073 Change-Id: Iee74de95ddad18440d052a75a5a1cb67544f490a --- tensorflow/core/kernels/searchsorted_op.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/core/kernels/searchsorted_op.cc b/tensorflow/core/kernels/searchsorted_op.cc index 01e221dc471c4d..5f075a6a540e9f 100644 --- a/tensorflow/core/kernels/searchsorted_op.cc +++ b/tensorflow/core/kernels/searchsorted_op.cc @@ -86,6 +86,10 @@ class UpperBoundOp : public OpKernel { const Tensor& sorted_inputs_t = ctx->input(0); const Tensor& values_t = ctx->input(1); + // inputs must be at least a matrix + OP_REQUIRES( + ctx, sorted_inputs_t.shape().dims() >= 2, + errors::InvalidArgument("sorted input argument must be a matrix")); // must have same batch dim_size for both OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), Status(error::INVALID_ARGUMENT, @@ -127,6 +131,10 @@ class LowerBoundOp : public OpKernel { const Tensor& sorted_inputs_t = ctx->input(0); const Tensor& values_t = ctx->input(1); + // inputs must be at least a matrix + OP_REQUIRES( + ctx, sorted_inputs_t.shape().dims() >= 2, + errors::InvalidArgument("sorted input argument must be a matrix")); // must have same batch dim_size for both OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), Status(error::INVALID_ARGUMENT, From 8bac83f84bcb6413d84c5e40757dbf6375ab4a97 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:24:52 -0700 Subject: [PATCH 225/256] Prevent overflow due to integer conversion to unsigned. PiperOrigin-RevId: 387738045 Change-Id: Id7e95bc07e02df1c66b72bd09f389608c87bdebe --- tensorflow/core/kernels/non_max_suppression_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/non_max_suppression_op.cc b/tensorflow/core/kernels/non_max_suppression_op.cc index 20ae3a2e0d07f6..913209dfd326ec 100644 --- a/tensorflow/core/kernels/non_max_suppression_op.cc +++ b/tensorflow/core/kernels/non_max_suppression_op.cc @@ -921,6 +921,8 @@ class CombinedNonMaxSuppressionOp : public OpKernel { errors::InvalidArgument("max_size_per_class must be 0-D, got shape ", max_output_size.shape().DebugString())); const int max_size_per_class = max_output_size.scalar()(); + OP_REQUIRES(context, max_size_per_class > 0, + errors::InvalidArgument("max_size_per_class must be positive")); // max_total_size: scalar const Tensor& max_total_size = context->input(3); OP_REQUIRES( From c32bc472471cdd8f4d85c91cf840db7e13359728 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:24:39 -0700 Subject: [PATCH 226/256] Prevent nullptr deref in validation of indexes in map ops. PiperOrigin-RevId: 387738023 Change-Id: I83d18d36a7b82ffd2a40b5124a4e5b4c72238f27 --- tensorflow/core/kernels/map_stage_op.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tensorflow/core/kernels/map_stage_op.cc b/tensorflow/core/kernels/map_stage_op.cc index 6c01e42ff8c9fd..2d396437d7186b 100644 --- a/tensorflow/core/kernels/map_stage_op.cc +++ b/tensorflow/core/kernels/map_stage_op.cc @@ -210,9 +210,9 @@ class StagingMap : public ResourceBase { const OptionalTuple& tuple) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (tuple[index].has_value()) { - return Status(errors::InvalidArgument( + return errors::InvalidArgument( "The tensor for index '", index, "' for key '", key.scalar()(), - "' was already initialized '", dtypes_.size(), "'.")); + "' was already initialized '", dtypes_.size(), "'."); } return Status::OK(); @@ -220,6 +220,10 @@ class StagingMap : public ResourceBase { // Check that the indices are strictly ordered Status check_index_ordering(const Tensor& indices) { + if (indices.NumElements() == 0) { + return errors::InvalidArgument("Indices are empty"); + } + auto findices = indices.flat(); for (std::size_t i = 0; i < findices.dimension(0) - 1; ++i) { @@ -227,8 +231,7 @@ class StagingMap : public ResourceBase { continue; } - return Status( - errors::InvalidArgument("Indices are not strictly ordered")); + return errors::InvalidArgument("Indices are not strictly ordered"); } return Status::OK(); @@ -238,10 +241,10 @@ class StagingMap : public ResourceBase { Status check_memory_limit(std::size_t bytes) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (has_memory_limit() && bytes > memory_limit_) { - return Status(errors::ResourceExhausted( + return errors::ResourceExhausted( "Attempted to insert tensors with combined size of '", bytes, "' bytes into Staging Area with a memory limit of '", memory_limit_, - "'.")); + "'."); } return Status::OK(); From 2099cab57e8d065d26f66fe359b5c9e9d218a816 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:24:27 -0700 Subject: [PATCH 227/256] Add remaining validation to `sdca_internal.cc` PiperOrigin-RevId: 387738010 Change-Id: I28eedcfd87a53aaf34deb075acea1f8c95470808 --- tensorflow/core/kernels/sdca_internal.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc index 11a3be8bf46a76..ed7149bf8365d8 100644 --- a/tensorflow/core/kernels/sdca_internal.cc +++ b/tensorflow/core/kernels/sdca_internal.cc @@ -380,6 +380,11 @@ Status Examples::Initialize(OpKernelContext* const context, const Tensor* example_labels_t; TF_RETURN_IF_ERROR(context->input("example_labels", &example_labels_t)); auto example_labels = example_labels_t->flat(); + if (example_labels.size() != num_examples) { + return errors::InvalidArgument("Expected ", num_examples, + " example labels but got ", + example_labels.size()); + } OpInputList dense_features_inputs; TF_RETURN_IF_ERROR( From cc96b0010c576e76c08248c2fe9f498f8a478f0a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:23:46 -0700 Subject: [PATCH 228/256] Prevent a CHECK-fail due to empty tensor input in `map_stage_op.cc` PiperOrigin-RevId: 387737906 Change-Id: Idc52df0c71c7ed6e2dd633b651a581932f277c8a --- tensorflow/core/kernels/map_stage_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/map_stage_op.cc b/tensorflow/core/kernels/map_stage_op.cc index 6c01e42ff8c9fd..85e9be3a2d0491 100644 --- a/tensorflow/core/kernels/map_stage_op.cc +++ b/tensorflow/core/kernels/map_stage_op.cc @@ -527,6 +527,8 @@ class MapStageOp : public OpKernel { OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor)); OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor)); OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor)); + OP_REQUIRES(ctx, key_tensor->NumElements() > 0, + errors::InvalidArgument("key must not be empty")); // Create copy for insertion into Staging Area Tensor key(*key_tensor); From 0290e0ffe98601df15b9e0a33105ff8d73ebe0f0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Jul 2021 20:50:00 -0700 Subject: [PATCH 229/256] Add missing validation to `maxpooling_op.cc` PiperOrigin-RevId: 387932441 Change-Id: I43a0b24e6a12cc965611144ba035accd384594b9 --- tensorflow/core/kernels/maxpooling_op.cc | 5 +++++ tensorflow/core/kernels/pooling_ops_common.cc | 2 ++ 2 files changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 7accd1a820b0e3..adfde788097deb 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -68,6 +68,7 @@ static void SpatialMaxPoolWithArgMaxHelper( "SpatialMaxPoolWithArgMaxHelper requires include_batch_in_index " "to be True when when input_backprop != nullptr")); } + if (tensor_in.NumElements() == 0 || output->NumElements() == 0) return; typedef Eigen::Map> ConstEigenMatrixMap; @@ -924,6 +925,10 @@ class MaxPoolingWithArgmaxOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); + OP_REQUIRES(context, tensor_in.dims() == 4, + errors::InvalidArgument("tensor_in must be 4-dimensional (2)")); + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("tensor_in must not be empty (2)")); PoolParameters params{context, ksize_, stride_, padding_, FORMAT_NHWC, tensor_in.shape()}; diff --git a/tensorflow/core/kernels/pooling_ops_common.cc b/tensorflow/core/kernels/pooling_ops_common.cc index 4bd710546fec26..59fbe883642a1a 100644 --- a/tensorflow/core/kernels/pooling_ops_common.cc +++ b/tensorflow/core/kernels/pooling_ops_common.cc @@ -96,6 +96,8 @@ PoolParameters::PoolParameters(OpKernelContext* context, pad_depth = 0; out_depth = depth; } else { + OP_REQUIRES(context, depth_window > 0, + errors::InvalidArgument("depth_window must not be 0")); // Our current version of depthwise max pooling does not support // any padding, and expects the depth_window to equal the // depth_stride (no overlapping). From 5d1f5615f1d613f7257f4293decb2a674affb32f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 22:24:08 -0700 Subject: [PATCH 230/256] Prevent a segfault in shape inference due to bad inputs. PiperOrigin-RevId: 387737970 Change-Id: Ibd1cf3dbdce1dd2ab47fd633d5c5a57f7d8fb6e9 --- tensorflow/core/ops/sparse_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/ops/sparse_ops.cc b/tensorflow/core/ops/sparse_ops.cc index 906cef1f5ecafe..b1e40e66af8929 100644 --- a/tensorflow/core/ops/sparse_ops.cc +++ b/tensorflow/core/ops/sparse_ops.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -619,6 +620,8 @@ REGISTER_OP("SparseFillEmptyRows") DimensionHandle unused_dim; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input_indices, 1), c->Dim(input_shape, 0), &unused_dim)); + if (c->Value(c->NumElements(input_shape)) == 0) + return errors::InvalidArgument("dense_shape must not be empty"); ShapeHandle output_indices = c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape)); ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim); From f639a0e28816580460c5254b6d25004fcd3ebd86 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 18:23:45 -0700 Subject: [PATCH 231/256] Prevent division by 0 in common shape functions. PiperOrigin-RevId: 387712197 Change-Id: Id25c7460e35b68aeeeac23b9a88e455b443ee149 --- tensorflow/core/framework/common_shape_fns.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tensorflow/core/framework/common_shape_fns.cc b/tensorflow/core/framework/common_shape_fns.cc index b9efddf4cdbc99..a81f7400389843 100644 --- a/tensorflow/core/framework/common_shape_fns.cc +++ b/tensorflow/core/framework/common_shape_fns.cc @@ -659,6 +659,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c, if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64 input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); + if (filter_input_depth_value == 0) + return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, @@ -668,6 +670,8 @@ Status Conv2DShapeImpl(shape_inference::InferenceContext* c, int64 num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64 output_depth_value = c->Value(output_depth_dim); + if (num_groups == 0) + return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, @@ -798,6 +802,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) { if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64 input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); + if (filter_input_depth_value == 0) + return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, @@ -807,6 +813,8 @@ Status Conv3DShape(shape_inference::InferenceContext* c) { int64 num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64 output_depth_value = c->Value(output_depth_dim); + if (num_groups == 0) + return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, @@ -2364,6 +2372,9 @@ Status SparseReduceShapeFn(InferenceContext* c) { int64 ndims = shape_vec.size(); absl::flat_hash_set axes; + if (ndims == 0) + return errors::InvalidArgument( + "Number of dims in shape tensor must not be 0"); for (int i = 0; i < axes_vec.size(); i++) { axes.insert((axes_vec(i) + ndims) % ndims); } From ad9bb3e9d78d2f659963d0ccaaef52f91a210706 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Jul 2021 18:24:18 -0700 Subject: [PATCH 232/256] Fix a shape inference issue leading to nullptr deref. PiperOrigin-RevId: 387712259 Change-Id: I7e670772b259c068a501a187cd89f18773bb95a1 --- tensorflow/core/ops/array_ops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc index 11bfb9a3346d04..ad11e0b7d64639 100644 --- a/tensorflow/core/ops/array_ops.cc +++ b/tensorflow/core/ops/array_ops.cc @@ -2887,6 +2887,10 @@ REGISTER_OP("Dequantize") if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } + if (axis < -1) { + return errors::InvalidArgument("axis should be at least -1, got ", + axis); + } const int minmax_rank = (axis == -1) ? 0 : 1; TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle minmax; From 1df5a69e9f1a18a937e7907223066e606bf466b9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 3 Aug 2021 11:20:20 -0700 Subject: [PATCH 233/256] Use the safer `safe_load` function instead of `unsafe_load` when possible There is no need to open ourselves up to arbitrary code execution, especially since this is not in a performance critical loop, so we can take the slowdown due to safety. PiperOrigin-RevId: 388501098 Change-Id: I3434318a5e07a798490533b554f46752397837e5 --- tensorflow/python/keras/engine/functional.py | 2 +- .../python/keras/engine/functional_test.py | 13 ------- tensorflow/python/keras/engine/training.py | 18 ++++----- .../python/keras/saving/model_config.py | 38 ++++--------------- 4 files changed, 17 insertions(+), 54 deletions(-) diff --git a/tensorflow/python/keras/engine/functional.py b/tensorflow/python/keras/engine/functional.py index fd80e7f8bb4ef5..b0cf778a895bed 100644 --- a/tensorflow/python/keras/engine/functional.py +++ b/tensorflow/python/keras/engine/functional.py @@ -58,7 +58,7 @@ class Functional(training_lib.Model): than with subclassed `Model`s, specifically: - Model cloning (`keras.models.clone`) - - Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()` + - Serialization (`model.get_config()/from_config`, `model.to_json()` - Whole-model saving (`model.save()`) A `Functional` model can be instantiated by passing two arguments to diff --git a/tensorflow/python/keras/engine/functional_test.py b/tensorflow/python/keras/engine/functional_test.py index b60373e8c9bd71..c91026a6ee3c8e 100644 --- a/tensorflow/python/keras/engine/functional_test.py +++ b/tensorflow/python/keras/engine/functional_test.py @@ -52,11 +52,6 @@ from tensorflow.python.platform import test from tensorflow.python.training.tracking.util import Checkpoint -try: - import yaml # pylint:disable=g-import-not-at-top -except ImportError: - yaml = None - class NetworkConstructionTest(keras_parameterized.TestCase): @@ -620,10 +615,6 @@ def test_multi_input_multi_output_recursion(self): json_str = model.to_json() models.model_from_json(json_str) - if yaml is not None: - yaml_str = model.to_yaml() - models.model_from_yaml(yaml_str) - @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_invalid_graphs(self): a = layers.Input(shape=(32,), name='input_a') @@ -1261,10 +1252,6 @@ def test_constant_initializer_with_numpy(self): json_str = model.to_json() models.model_from_json(json_str) - if yaml is not None: - yaml_str = model.to_yaml() - models.model_from_yaml(yaml_str) - def test_subclassed_error_if_init_not_called(self): class MyNetwork(training_lib.Model): diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py index a0ebec4f95e25c..e000e62f5da42f 100644 --- a/tensorflow/python/keras/engine/training.py +++ b/tensorflow/python/keras/engine/training.py @@ -88,11 +88,6 @@ import h5py except ImportError: h5py = None - -try: - import yaml -except ImportError: - yaml = None # pylint: enable=g-import-not-at-top @@ -2258,6 +2253,9 @@ def to_json(self, **kwargs): def to_yaml(self, **kwargs): """Returns a yaml string containing the network configuration. + Note: Since TF 2.6, this method is no longer supported and will raise a + RuntimeError. + To load a network from a yaml save file, use `keras.models.model_from_yaml(yaml_string, custom_objects={})`. @@ -2273,12 +2271,12 @@ def to_yaml(self, **kwargs): A YAML string. Raises: - ImportError: if yaml module is not found. + RuntimeError: announces that the method poses a security risk """ - if yaml is None: - raise ImportError( - 'Requires yaml module installed (`pip install pyyaml`).') - return yaml.dump(self._updated_config(), **kwargs) + raise RuntimeError( + 'Method `model.to_yaml()` has been removed due to security risk of ' + 'arbitrary code execution. Please use `model.to_json()` instead.' + ) def reset_states(self): for layer in self.layers: diff --git a/tensorflow/python/keras/saving/model_config.py b/tensorflow/python/keras/saving/model_config.py index 63f82b404a4c1c..344e543f9930a6 100644 --- a/tensorflow/python/keras/saving/model_config.py +++ b/tensorflow/python/keras/saving/model_config.py @@ -23,13 +23,6 @@ from tensorflow.python.util.tf_export import keras_export -# pylint: disable=g-import-not-at-top -try: - import yaml -except ImportError: - yaml = None -# pylint: enable=g-import-not-at-top - @keras_export('keras.models.model_from_config') def model_from_config(config, custom_objects=None): @@ -59,17 +52,8 @@ def model_from_config(config, custom_objects=None): def model_from_yaml(yaml_string, custom_objects=None): """Parses a yaml model configuration file and returns a model instance. - Usage: - - >>> model = tf.keras.Sequential([ - ... tf.keras.layers.Dense(5, input_shape=(3,)), - ... tf.keras.layers.Softmax()]) - >>> try: - ... import yaml - ... config = model.to_yaml() - ... loaded_model = tf.keras.models.model_from_yaml(config) - ... except ImportError: - ... pass + Note: Since TF 2.6, this method is no longer supported and will raise a + RuntimeError. Arguments: yaml_string: YAML string or open file encoding a model configuration. @@ -81,19 +65,13 @@ def model_from_yaml(yaml_string, custom_objects=None): A Keras model instance (uncompiled). Raises: - ImportError: if yaml module is not found. + RuntimeError: announces that the method poses a security risk """ - if yaml is None: - raise ImportError('Requires yaml module installed (`pip install pyyaml`).') - # The method unsafe_load only exists in PyYAML 5.x+, so which branch of the - # try block is covered by tests depends on the installed version of PyYAML. - try: - # PyYAML 5.x+ - config = yaml.unsafe_load(yaml_string) - except AttributeError: - config = yaml.load(yaml_string) - from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top - return deserialize(config, custom_objects=custom_objects) + raise RuntimeError( + 'Method `model_from_yaml()` has been removed due to security risk of ' + 'arbitrary code execution. Please use `Model.to_json()` and ' + '`model_from_json()` instead.' + ) @keras_export('keras.models.model_from_json') From 88e31ad134083c01b00f329b867d2973c0871625 Mon Sep 17 00:00:00 2001 From: Edward Loper Date: Thu, 29 Jul 2021 09:50:01 -0700 Subject: [PATCH 234/256] Fix bug that could cause map_fn to produce incorrect results (rather than an error) when mapping over a ragged tensor with an inappropriate fn_output_signature. (Note: there are cases where the default value for fn_output_signature is not appropriate, so the user needs to explicitly specify the correct output signature.) PiperOrigin-RevId: 387606546 Change-Id: Ib4ea27b9634e6ab413f211cfe809a69a90f0e2cd --- .../kernels/ragged_tensor_from_variant_op.cc | 16 +++++++++++++ .../ops/ragged/ragged_map_fn_op_test.py | 23 +++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc index f83bcb38c6c336..17dbc1d117f24c 100644 --- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc @@ -194,7 +194,23 @@ Status NestedStackRaggedTensors( auto output_values_flat = output_ragged->values.flat_outer_dims(); int values_index = 0; + + TensorShape expected_value_shape = component_values_shape; + expected_value_shape.RemoveDim(0); + for (int i = 0; i < ragged_components.size(); i++) { + // Check that the flat_values tensor shape is compatible. + TensorShape value_shape = ragged_components[i].values().shape(); + value_shape.RemoveDim(0); + if (value_shape != expected_value_shape) { + return errors::InvalidArgument( + "All flat_values must have compatible shapes. Shape at index 0: ", + expected_value_shape, ". Shape at index ", i, ": ", value_shape, + ". If you are using tf.map_fn, then you may need to specify an " + "explicit fn_output_signature with appropriate ragged_rank, and/or " + "convert output tensors to RaggedTensors."); + } + auto component_values_flat = ragged_components[i].values.flat_outer_dims(); int num_inner_elements = ragged_components[i].values.NumElements(); diff --git a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py index 9e74de4bc358dc..4808a10f86e8c1 100644 --- a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py +++ b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py @@ -21,9 +21,11 @@ import numpy as np from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops +from tensorflow.python.ops import map_fn as map_fn_lib from tensorflow.python.ops import math_ops as mo from tensorflow.python.ops import string_ops from tensorflow.python.ops.ragged import ragged_factory_ops @@ -294,6 +296,27 @@ def testMapOnSparseTensor(self): ) self.assertAllEqual(id_t2, [[0, 5], [0, 4]]) + def testRaggedMapWithIncorrectFnOutputSignature(self): + x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) + with self.assertRaisesRegex(errors.InvalidArgumentError, + 'All flat_values must have compatible shapes'): + y = map_fn_lib.map_fn(lambda r: map_fn_lib.map_fn(lambda y: r, r), x) + self.evaluate(y) + + def testNestedRaggedMapWithFnOutputSignature(self): + ragged1d = ragged_tensor.RaggedTensorSpec([None], dtypes.int32) + ragged2d = ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32) + + x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) + # pylint: disable=g-long-lambda + y = map_fn_lib.map_fn( + lambda r: map_fn_lib.map_fn( + lambda y: r, r, fn_output_signature=ragged1d), + x, + fn_output_signature=ragged2d) + expected = [[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], [[1]]] + self.assertAllEqual(y, expected) + if __name__ == '__main__': googletest.main() From 1bfd716a2fcdfd7516671c0e5fb45c164ec0b230 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 06:49:45 -0700 Subject: [PATCH 235/256] Prevent division by 0 in `fully_connected.cc` PiperOrigin-RevId: 385137282 Change-Id: If201e69b6e0048f0be001330b4b977e2b46db2cb --- tensorflow/lite/kernels/fully_connected.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index 9cbbcae9c51291..7fbbf9983ac675 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -175,6 +175,7 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) { } TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2); + TF_LITE_ENSURE(context, filter->dims->data[1] != 0); const int batch_size = input_size / filter->dims->data[1]; const int num_units = filter->dims->data[0]; From 781d109778fc31bfc00352175ce4e2779fe6cde4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 09:14:31 -0700 Subject: [PATCH 236/256] Fix a null pointer exception in SVDF This is due to not checking that `GetVariableInput` returns non-null tensor. Also fix a potential null pointer exception in `GetVariableInput`. PiperOrigin-RevId: 385160147 Change-Id: Iadf3f0705b036a9014d27caa5a8bbd91f4c4c401 --- tensorflow/lite/kernels/svdf.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/svdf.cc b/tensorflow/lite/kernels/svdf.cc index 267f07951f9e76..ef546072d5a538 100644 --- a/tensorflow/lite/kernels/svdf.cc +++ b/tensorflow/lite/kernels/svdf.cc @@ -274,6 +274,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* scratch = GetTemporary(context, node, /*index=*/0); TfLiteTensor* state = GetVariableInput(context, node, kStateTensor); + TF_LITE_ENSURE(context, state != nullptr); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (weights_feature->type) { From 22386ec2ab59e62226037e4e2f81ed49b1c3b86a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 09:35:48 -0700 Subject: [PATCH 237/256] Fix a null pointer exception caused by branching on uninitialized data. This is due to not checking that the params for the quantization exists. If there is no quantization, we should not access the `.params` field. PiperOrigin-RevId: 385163909 Change-Id: I2beb8d50649b6542db224c163033fbcbaa49314f --- tensorflow/lite/kernels/svdf.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/lite/kernels/svdf.cc b/tensorflow/lite/kernels/svdf.cc index 267f07951f9e76..ec19fb9256ccb8 100644 --- a/tensorflow/lite/kernels/svdf.cc +++ b/tensorflow/lite/kernels/svdf.cc @@ -236,14 +236,21 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output_temp_size_array)); // Calculate effective scales. + TF_LITE_ENSURE(context, input->quantization.type != kTfLiteNoQuantization); auto* input_params = reinterpret_cast(input->quantization.params); + TF_LITE_ENSURE(context, + weights_feature->quantization.type != kTfLiteNoQuantization); auto* weights_feature_params = reinterpret_cast( weights_feature->quantization.params); + TF_LITE_ENSURE(context, state->quantization.type != kTfLiteNoQuantization); auto* state_params = reinterpret_cast(state->quantization.params); + TF_LITE_ENSURE(context, + weights_time->quantization.type != kTfLiteNoQuantization); auto* weight_time_params = reinterpret_cast( weights_time->quantization.params); + TF_LITE_ENSURE(context, output->quantization.type != kTfLiteNoQuantization); auto* output_params = reinterpret_cast( output->quantization.params); const double effective_scale_1 = input_params->scale->data[0] * From da2e2d8af295990fe036e65902de79729b4678f7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 10:22:37 -0700 Subject: [PATCH 238/256] Fix a null pointer exception caused by branching on uninitialized data. This is due to not checking that the params for the quantization exists. If there is no quantization, we should not access the `.params` field. PiperOrigin-RevId: 385173491 Change-Id: I8fc476c4b274fdb21ba741caa0fbc6d1b8840663 --- tensorflow/lite/kernels/depthwise_conv.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index ee0b8eb86ee2ec..d37c2700755b1b 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -171,6 +171,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { if (data_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast( filter->quantization.params); @@ -190,6 +191,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } if (is_hybrid) { + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast( filter->quantization.params); @@ -476,6 +478,7 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, op_params.weights_offset = 0; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; + TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization); const auto* affine_quantization = reinterpret_cast(filter->quantization.params); if (kernel_type == kReference) { From d5c59fbfbb7707014386bf585d99b0c7477e4a4b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 14:23:21 -0700 Subject: [PATCH 239/256] Prevent a division by 0 in division ops. PiperOrigin-RevId: 385223169 Change-Id: Ia4228960b5d2aa44480385f74bdd70d21a3613c3 --- tensorflow/lite/kernels/div.cc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/div.cc b/tensorflow/lite/kernels/div.cc index c9eb1db531a647..aafe00f0d0cbe9 100644 --- a/tensorflow/lite/kernels/div.cc +++ b/tensorflow/lite/kernels/div.cc @@ -204,9 +204,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { + // TODO(b/193904910): This can written with C++ templates +#define TF_LITE_CHECK_DIV_NON_ZERO(data_type) \ + const auto* input2_data = GetTensorData(input2); \ + const size_t input2_elements = input2->bytes / sizeof(data_type); \ + for (size_t i = 0; i < input2_elements; i++) { \ + TF_LITE_ENSURE(context, input2_data[i] != 0); \ + } + + if (output->type == kTfLiteFloat32) { + // Div by zero seems ok in this case, just like in TF case infinities are + // returned. So we don't do a check at this point. + EvalDiv(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt32) { + TF_LITE_CHECK_DIV_NON_ZERO(int32_t); EvalDiv(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { + TF_LITE_CHECK_DIV_NON_ZERO(uint8_t); TF_LITE_ENSURE_OK( context, EvalQuantized(context, node, params, data, input1, input2, output)); @@ -217,6 +231,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { output->type); return kTfLiteError; } +#undef TF_LITE_CHECK_DIV_NON_ZERO return kTfLiteOk; } From afede8ba593670931834b29fae7ad66c4f240ca6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 16 Jul 2021 11:09:58 -0700 Subject: [PATCH 240/256] Prevent a division by 0 in average ops. PiperOrigin-RevId: 385184660 Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 --- .../internal/averagepool_quantized_test.cc | 14 +- .../internal/optimized/integer_ops/pooling.h | 4 +- .../internal/optimized/legacy_optimized_ops.h | 46 ++--- .../internal/optimized/optimized_ops.h | 14 +- .../internal/reference/integer_ops/pooling.h | 8 +- .../internal/reference/legacy_reference_ops.h | 46 ++--- .../lite/kernels/internal/reference/pooling.h | 8 +- tensorflow/lite/kernels/pooling.cc | 160 +++++++++--------- 8 files changed, 167 insertions(+), 133 deletions(-) diff --git a/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc b/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc index cbc863645b74b9..fea343ae6b8824 100644 --- a/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc +++ b/tensorflow/lite/kernels/internal/averagepool_quantized_test.cc @@ -40,12 +40,14 @@ void RunOneAveragePoolTest(const PoolParams& params, std::vector optimized_averagePool_output(buffer_size); std::vector reference_averagePool_output(buffer_size); - reference_integer_ops::AveragePool(params, input_shape, input_data, - output_shape, - reference_averagePool_output.data()); - optimized_integer_ops::AveragePool(params, input_shape, input_data, - output_shape, - optimized_averagePool_output.data()); + bool reference_success = reference_integer_ops::AveragePool( + params, input_shape, input_data, output_shape, + reference_averagePool_output.data()); + bool optimized_success = optimized_integer_ops::AveragePool( + params, input_shape, input_data, output_shape, + optimized_averagePool_output.data()); + EXPECT_TRUE(reference_success); + EXPECT_TRUE(optimized_success); for (int i = 0; i < buffer_size; i++) { EXPECT_TRUE(reference_averagePool_output[i] == diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h index f2696500ab9874..ca94869f564d48 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h @@ -145,7 +145,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool16(const PoolParams& params, +inline bool AveragePool16(const PoolParams& params, const RuntimeShape& input_shape, const int8* input_data, const RuntimeShape& output_shape, int8* output_data) { @@ -194,6 +194,7 @@ inline void AveragePool16(const PoolParams& params, std::min(params.filter_height, input_height - in_y_origin); const int filter_count = (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start); + if (filter_count == 0) return false; memset(acc, 0, tranche_depth * sizeof(acc[0])); const int8* input_ptr = input_data + depth_base + @@ -281,6 +282,7 @@ inline void AveragePool16(const PoolParams& params, } } } + return true; } inline void AveragePool(const PoolParams& params, diff --git a/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h index f206dfa9235428..0f1c50329c733d 100644 --- a/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h @@ -3763,7 +3763,7 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims, output_data, output_dims); } -inline void AveragePool(const float* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float output_activation_min, @@ -3778,35 +3778,37 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.float_activation_min = output_activation_min; params.float_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float* output_data, const Dims<4>& output_dims) { float output_activation_min, output_activation_max; GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, kwidth, kheight, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, kwidth, kheight, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, float* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, output_data, + output_dims); } -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, @@ -3821,13 +3823,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.quantized_activation_min = output_activation_min; params.quantized_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, @@ -3841,21 +3843,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims, TFLITE_DCHECK_EQ(output_activation_min, 0); TFLITE_DCHECK_EQ(output_activation_max, 255); } - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, + output_data, output_dims); } inline void MaxPool(const float* input_data, const Dims<4>& input_dims, diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index 528eea3d698678..93bf924190dce7 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -3223,7 +3223,7 @@ inline int NodeOffset(int b, int h, int w, int height, int width) { return (b * height + h) * width + w; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -3238,6 +3238,9 @@ inline void AveragePool(const PoolParams& params, const int stride_height = params.stride_height; const int stride_width = params.stride_width; + if (stride_height == 0) return false; + if (stride_width == 0) return false; + // TODO(benoitjacob) make this a proper reference impl without Eigen! const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape); auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape); @@ -3283,9 +3286,11 @@ inline void AveragePool(const PoolParams& params, params.float_activation_min, params.float_activation_max); } + + return true; } -inline void AveragePool16(const PoolParams& params, +inline bool AveragePool16(const PoolParams& params, const RuntimeShape& input_shape, const uint8* input_data, const RuntimeShape& output_shape, @@ -3335,6 +3340,7 @@ inline void AveragePool16(const PoolParams& params, std::min(params.filter_height, input_height - in_y_origin); const int filter_count = (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start); + if (filter_count == 0) return false; memset(acc, 0, tranche_depth * sizeof(acc[0])); const uint8* input_ptr = input_data + depth_base + @@ -3417,7 +3423,7 @@ inline void AveragePool16(const PoolParams& params, } } -inline void AveragePool32(const PoolParams& params, +inline bool AveragePool32(const PoolParams& params, const RuntimeShape& input_shape, const uint8* input_data, const RuntimeShape& output_shape, @@ -3467,6 +3473,7 @@ inline void AveragePool32(const PoolParams& params, std::min(params.filter_height, input_height - in_y_origin); const int filter_count = (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start); + if (filter_count == 0) return false; memset(acc, 0, tranche_depth * sizeof(acc[0])); const uint8* input_ptr = input_data + depth_base + @@ -3553,6 +3560,7 @@ inline void AveragePool32(const PoolParams& params, } } } + return true; } inline void AveragePool(const PoolParams& params, diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h index 6b49d2b150bf46..f0ef31269a34f7 100644 --- a/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +++ b/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h @@ -21,7 +21,7 @@ limitations under the License. namespace tflite { namespace reference_integer_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8* input_data, const RuntimeShape& output_shape, int8* output_data) { TFLITE_DCHECK_LE(params.quantized_activation_min, @@ -65,6 +65,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -76,6 +77,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, @@ -135,7 +137,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int16* input_data, const RuntimeShape& output_shape, int16* output_data) { @@ -180,6 +182,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -191,6 +194,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h b/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h index f62c9bd197c876..c204b3946b522f 100644 --- a/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h +++ b/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h @@ -1528,7 +1528,7 @@ void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data, output_data); } -inline void AveragePool(const float* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float output_activation_min, @@ -1543,8 +1543,8 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.float_activation_min = output_activation_min; params.float_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // Transitional version that will be moved shortly to legacy_reference_ops, as @@ -1603,29 +1603,31 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims, // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float* output_data, const Dims<4>& output_dims) { float output_activation_min, output_activation_max; GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, kwidth, kheight, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, kwidth, kheight, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, float* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, output_data, + output_dims); } -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, @@ -1640,13 +1642,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, params.padding_values.width = pad_width; params.quantized_activation_min = output_activation_min; params.quantized_activation_max = output_activation_max; - AveragePool(params, DimsToShape(input_dims), input_data, - DimsToShape(output_dims), output_data); + return AveragePool(params, DimsToShape(input_dims), input_data, + DimsToShape(output_dims), output_data); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, @@ -1660,21 +1662,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims, TFLITE_DCHECK_EQ(output_activation_min, 0); TFLITE_DCHECK_EQ(output_activation_max, 255); } - AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, - pad_height, filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride_width, stride_height, + pad_width, pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, output_data, + output_dims); } // legacy, for compatibility with old checked-in code template -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { - AveragePool(input_data, input_dims, stride, stride, pad_width, pad_height, - filter_width, filter_height, output_activation_min, - output_activation_max, output_data, output_dims); + return AveragePool(input_data, input_dims, stride, stride, pad_width, + pad_height, filter_width, filter_height, + output_activation_min, output_activation_max, + output_data, output_dims); } inline void MaxPool(const float* input_data, const Dims<4>& input_dims, diff --git a/tensorflow/lite/kernels/internal/reference/pooling.h b/tensorflow/lite/kernels/internal/reference/pooling.h index a03359cda8217b..685e1f50a550f0 100644 --- a/tensorflow/lite/kernels/internal/reference/pooling.h +++ b/tensorflow/lite/kernels/internal/reference/pooling.h @@ -23,7 +23,7 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; const float average = total / filter_count; output_data[Offset(output_shape, batch, out_y, out_x, channel)] = ActivationFunctionWithMinMax(average, params.float_activation_min, @@ -74,9 +75,10 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8* input_data, const RuntimeShape& output_shape, uint8* output_data) { @@ -121,6 +123,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; acc = (acc + filter_count / 2) / filter_count; acc = std::max(acc, params.quantized_activation_min); acc = std::min(acc, params.quantized_activation_max); @@ -130,6 +133,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/tensorflow/lite/kernels/pooling.cc b/tensorflow/lite/kernels/pooling.cc index 6c97824cae6943..afce4ad77d8ca9 100644 --- a/tensorflow/lite/kernels/pooling.cc +++ b/tensorflow/lite/kernels/pooling.cc @@ -115,117 +115,126 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { } template -void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { +TfLiteStatus AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.float_activation_min = activation_min; \ - op_params.float_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.float_activation_min = activation_min; \ + op_params.float_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, - TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_integer_ops); } else { TF_LITE_AVERAGE_POOL(optimized_integer_ops); } #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template -void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, - TfLiteTensor* output) { +TfLiteStatus AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, + TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - tflite::PoolParams op_params; \ - op_params.stride_height = params->stride_height; \ - op_params.stride_width = params->stride_width; \ - op_params.filter_height = params->filter_height; \ - op_params.filter_width = params->filter_width; \ - op_params.padding_values.height = data->padding.height; \ - op_params.padding_values.width = data->padding.width; \ - op_params.quantized_activation_min = activation_min; \ - op_params.quantized_activation_max = activation_max; \ - type::AveragePool(op_params, GetTensorShape(input), \ - GetTensorData(input), GetTensorShape(output), \ - GetTensorData(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), \ + GetTensorShape(output), \ + GetTensorData(output))) TF_LITE_AVERAGE_POOL(reference_integer_ops); #undef TF_LITE_AVERAGE_POOL + return kTfLiteOk; } template @@ -376,20 +385,17 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; + return AverageEvalFloat(context, node, params, data, input, + output); case kTfLiteUInt8: - AverageEvalQuantizedUint8(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedUint8(context, node, params, data, + input, output); case kTfLiteInt8: - AverageEvalQuantizedInt8(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedInt8(context, node, params, data, + input, output); case kTfLiteInt16: - AverageEvalQuantizedInt16(context, node, params, data, input, - output); - break; + return AverageEvalQuantizedInt16(context, node, params, data, + input, output); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); From 7ee8e39cde1395f94fc0010e43ece6f282df3a35 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 14:42:54 -0700 Subject: [PATCH 241/256] Prevent an OOB read in `expand_dims.cc` The for loop that follows this check assumes that `axis` is between `0` and `input_dims.size`. If user supplied `axis` is negative, the if code before this check is supposed to bring it back to positive (similar to how in Python one can do `l[-3]` to mean `l[-3 + len(l)]`). PiperOrigin-RevId: 387200206 Change-Id: I162f4feba12d547c3a4340833ae682016a2ebfab --- tensorflow/lite/kernels/expand_dims.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/expand_dims.cc b/tensorflow/lite/kernels/expand_dims.cc index 721ab3d510a526..6db2830ea12e78 100644 --- a/tensorflow/lite/kernels/expand_dims.cc +++ b/tensorflow/lite/kernels/expand_dims.cc @@ -38,6 +38,7 @@ TfLiteStatus ExpandTensorDim(TfLiteContext* context, const TfLiteTensor& input, axis = input_dims.size + 1 + axis; } TF_LITE_ENSURE(context, axis <= input_dims.size); + TF_LITE_ENSURE(context, axis >= 0); TfLiteIntArray* output_dims = TfLiteIntArrayCreate(input_dims.size + 1); for (int i = 0; i < output_dims->size; ++i) { From 10c76c7a377586fcadf1e3d710425d940d57878a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 15:20:26 -0700 Subject: [PATCH 242/256] Prevent heap OOB read in TFLite's `gather_nd.cc`. Passing negative indices is illegal but there was a missing check so that resulted in OOB accesses. PiperOrigin-RevId: 387208551 Change-Id: I6b7a8a62d3e7c13a16d81619e5bc23ae2cdbc7fd --- tensorflow/lite/kernels/gather_nd.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tensorflow/lite/kernels/gather_nd.cc b/tensorflow/lite/kernels/gather_nd.cc index d84621918133fe..18108b00cdb1e3 100644 --- a/tensorflow/lite/kernels/gather_nd.cc +++ b/tensorflow/lite/kernels/gather_nd.cc @@ -118,6 +118,17 @@ TfLiteStatus GatherNdString(const TfLiteTensor* params, template TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { + bool indices_has_only_positive_elements = true; + const auto* indices_values = GetTensorData(indices); + const size_t num_indices = indices->bytes / sizeof(IndicesT); + for (size_t i = 0; i < num_indices; i++) { + if (indices_values[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + switch (params->type) { case kTfLiteFloat32: return GatherNd(params, indices, output); From 14f52ca726de725b7e6b15f1783ae9ea3b197227 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 18:40:39 -0700 Subject: [PATCH 243/256] Prevent dereferencing of null pointers in TFLite's `add.cc`. PiperOrigin-RevId: 387244946 Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9 --- tensorflow/lite/kernels/internal/optimized/optimized_ops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index 528eea3d698678..964bd840d612b0 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -304,7 +304,7 @@ inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params, // We have broadcast y2*y3*y4 of input2 data y1 times, and now move on. input2_data_reset = input2_data_ptr; } - } else { + } else if (input1_data_ptr != nullptr) { // Special case of y4 == 1, in which the innermost loop is a single // element and can be combined with the next (y3) as an inner broadcast. // From e05490e73face64921faecbfcd09a22bd707fa59 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 16:20:45 -0700 Subject: [PATCH 244/256] Prevent nullptr dereference in MLIR TFLite dialect/optimizer. PiperOrigin-RevId: 387220762 Change-Id: Id136ef04bb3d36123b4685d316ae81a9ec924d6b --- tensorflow/compiler/mlir/lite/transforms/optimize.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize.cc b/tensorflow/compiler/mlir/lite/transforms/optimize.cc index 30ae4b81f4f324..0be9d2d109ff82 100644 --- a/tensorflow/compiler/mlir/lite/transforms/optimize.cc +++ b/tensorflow/compiler/mlir/lite/transforms/optimize.cc @@ -56,6 +56,9 @@ constexpr char kRelu6[] = "RELU6"; constexpr char kRelu1[] = "RELU_N1_TO_1"; bool L2NormalizeReduceAxis(Value sq_op, DenseElementsAttr axis) { + if (axis.getNumElements() == 0) { + return false; + } if (sq_op.getType().cast().getRank() - 1 == *axis.getValues().begin() || *axis.getValues().begin() == -1) { From f20119917f86b9c582fc19efc3b9c80700b09a75 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 16:45:20 -0700 Subject: [PATCH 245/256] Prevent division by 0 in LSH projection. PiperOrigin-RevId: 387225857 Change-Id: Iaeb572a763618c64f503e0026f6dd9fd769bf50c --- tensorflow/lite/kernels/lsh_projection.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/lsh_projection.cc b/tensorflow/lite/kernels/lsh_projection.cc index b809748c59ca82..34cbf6e5c3aa78 100644 --- a/tensorflow/lite/kernels/lsh_projection.cc +++ b/tensorflow/lite/kernels/lsh_projection.cc @@ -28,7 +28,7 @@ limitations under the License. // // Input: // Tensor[0]: Hash functions. Dim.size == 2, DataType: Float. -// Tensor[0].Dim[0]: Num of hash functions. +// Tensor[0].Dim[0]: Num of hash functions. Must be at least 1. // Tensor[0].Dim[1]: Num of projected output bits generated by // each hash function. // In sparse case, Tensor[0].Dim[1] + ceil( log2(Tensor[0].Dim[0] )) <= 32. @@ -80,6 +80,7 @@ TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TF_LITE_ENSURE(context, SizeOfDimension(input, 0) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); From 115ab14da6d342897b6bc290618dedb0ce341455 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Jul 2021 17:11:14 -0700 Subject: [PATCH 246/256] Prevent heap OOB read in TFLite's `gather.cc`. Passing negative indices is illegal but there was a missing check so that resulted in OOB accesses. PiperOrigin-RevId: 387231300 Change-Id: I3111b54b2f232638d795be17efc46abe4ede6bf8 --- tensorflow/lite/kernels/gather.cc | 63 ++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 14 deletions(-) diff --git a/tensorflow/lite/kernels/gather.cc b/tensorflow/lite/kernels/gather.cc index 1de49f7c486c44..63e7cf8e2161f3 100644 --- a/tensorflow/lite/kernels/gather.cc +++ b/tensorflow/lite/kernels/gather.cc @@ -98,8 +98,20 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } template -TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input, - const TfLiteTensor* positions, TfLiteTensor* output) { +TfLiteStatus Gather(TfLiteContext* context, const TfLiteGatherParams& params, + const TfLiteTensor* input, const TfLiteTensor* positions, + TfLiteTensor* output) { + const PositionsT* indexes = GetTensorData(positions); + bool indices_has_only_positive_elements = true; + const size_t num_indices = positions->bytes / sizeof(PositionsT); + for (size_t i = 0; i < num_indices; i++) { + if (indexes[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + tflite::GatherParams op_params; op_params.axis = params.axis; optimized_ops::Gather(op_params, GetTensorShape(input), @@ -114,7 +126,18 @@ TfLiteStatus GatherStrings(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { DynamicBuffer buffer; + const PositionT* indexes = GetTensorData(positions); + bool indices_has_only_positive_elements = true; + const size_t num_indices = positions->bytes / sizeof(PositionT); + for (size_t i = 0; i < num_indices; i++) { + if (indexes[i] < 0) { + indices_has_only_positive_elements = false; + break; + } + } + TF_LITE_ENSURE(context, indices_has_only_positive_elements); + const PositionT num_strings = GetStringCount(input); const int num_indexes = NumElements(positions); @@ -138,17 +161,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteUInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt64: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteBool: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteString: return GatherStrings(context, input, positions, output); default: @@ -160,17 +189,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteUInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt8: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt32: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteInt64: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteBool: - return Gather(*params, input, positions, output); + return Gather(context, *params, input, positions, + output); case kTfLiteString: return GatherStrings(context, input, positions, output); default: From e4fcb3f4381876ff99ad92891b13c3f4066b54ca Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 13 Jul 2021 16:30:44 -0700 Subject: [PATCH 247/256] Bump curl dependency to 7.77.0 Handles the following CVEs: * [CVE-2021-22901](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22901) * [CVE-2021-22898](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22898) * [CVE-2021-22876](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22876) * [CVE-2021-22897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22897) PiperOrigin-RevId: 384576784 Change-Id: Iaf4f499736039ea957efb0af596d1a46f3062797 --- tensorflow/workspace.bzl | 12 ++++++------ third_party/curl.BUILD | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index f2139a795670cb..87af1667fd1a4b 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -671,13 +671,13 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "curl", - build_file = clean_dep("//third_party:curl.BUILD"), - sha256 = "3b4378156ba09e224008e81dcce854b7ce4d182b1f9cfb97fe5ed9e9c18c6bd3", - strip_prefix = "curl-7.76.0", - system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), + build_file = "//third_party:curl.BUILD", + sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77", + strip_prefix = "curl-7.77.0", + system_build_file = "//third_party/systemlibs:curl.BUILD", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.76.0.tar.gz", - "https://curl.haxx.se/download/curl-7.76.0.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.77.0.tar.gz", + "https://curl.haxx.se/download/curl-7.77.0.tar.gz", ], ) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index 3b73ac7d29fa8f..85b09cf1bf4c72 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -40,6 +40,8 @@ cc_library( "lib/asyn-ares.c", "lib/asyn.h", "lib/base64.c", + "lib/bufref.c", + "lib/bufref.h", "lib/c-hyper.c", "lib/c-hyper.h", "lib/config-amigaos.h", From 6cb503c3e5832395e2f464ad99435f37c41fb477 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 7 Aug 2021 14:06:12 -0700 Subject: [PATCH 248/256] Update release notes for the new patch release. --- RELEASE.md | 282 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 173 insertions(+), 109 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 74291aa01364e5..c6060ffdbc21ae 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,116 +1,180 @@ +# Release 2.3.4 + +This release introduces several vulnerability fixes: + +* Fixes a heap out of bounds access in sparse reduction operations [CVE-2021-37635](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37635) +* Fixes a floating point exception in `SparseDenseCwiseDiv` [CVE-2021-37636](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37636) +* Fixes a null pointer dereference in `CompressElement` [CVE-2021-37637](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37637) +* Fixes a null pointer dereference in `RaggedTensorToTensor` [CVE-2021-37638](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37638) +* Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors [CVE-2021-37639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37639) +* Fixes an integer division by 0 in sparse reshaping [CVE-2021-37640](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37640) +* Fixes a division by 0 in `ResourceScatterDiv` [CVE-2021-37642](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37642) +* Fixes a heap OOB in `RaggedGather` [CVE-2021-37641](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37641) +* Fixes a `std::abort` raised from `TensorListReserve` [CVE-2021-37644](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37644) +* Fixes a null pointer dereference in `MatrixDiagPartOp` [CVE-2021-37643](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37643) +* Fixes an integer overflow due to conversion to unsigned [CVE-2021-37645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37645) +* Fixes a bad allocation error in `StringNGrams` caused by integer conversion [CVE-2021-37646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37646) +* Fixes a null pointer dereference in `SparseTensorSliceDataset` [CVE-2021-37647](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37647) +* Fixes an incorrect validation of `SaveV2` inputs [CVE-2021-37648](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37648) +* Fixes a null pointer dereference in `UncompressElement` [CVE-2021-37649](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37649) +* Fixes a segfault and a heap buffer overflow in `{Experimental,}DatasetToTFRecord` [CVE-2021-37650](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37650) +* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` [CVE-2021-37651](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37651) +* Fixes a use after free in boosted trees creation [CVE-2021-37652](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37652) +* Fixes a division by 0 in `ResourceGather` [CVE-2021-37653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37653) +* Fixes a heap OOB and a `CHECK` fail in `ResourceGather` [CVE-2021-37654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37654) +* Fixes a heap OOB in `ResourceScatterUpdate` [CVE-2021-37655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37655) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToSparse` [CVE-2021-37656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37656) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixDiagV*` ops [CVE-2021-37657](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37657) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixSetDiagV*` ops [CVE-2021-37658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37658) +* Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops [CVE-2021-37659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37659) +* Fixes a division by 0 in inplace operations [CVE-2021-37660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37660) +* Fixes a crash caused by integer conversion to unsigned [CVE-2021-37661](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37661) +* Fixes an undefined behavior arising from reference binding to nullptr in boosted trees [CVE-2021-37662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37662) +* Fixes a heap OOB in boosted trees [CVE-2021-37664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37664) +* Fixes vulnerabilities arising from incomplete validation in `QuantizeV2` [CVE-2021-37663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37663) +* Fixes vulnerabilities arising from incomplete validation in MKL requantization [CVE-2021-37665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37665) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToVariant` [CVE-2021-37666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37666) +* Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding [CVE-2021-37667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37667) +* Fixes an FPE in `tf.raw_ops.UnravelIndex` [CVE-2021-37668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37668) +* Fixes a crash in NMS ops caused by integer conversion to unsigned [CVE-2021-37669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37669) +* Fixes a heap OOB in `UpperBound` and `LowerBound` [CVE-2021-37670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37670) +* Fixes an undefined behavior arising from reference binding to nullptr in map operations [CVE-2021-37671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37671) +* Fixes a heap OOB in `SdcaOptimizerV2` [CVE-2021-37672](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37672) +* Fixes a `CHECK`-fail in `MapStage` [CVE-2021-37673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37673) +* Fixes a vulnerability arising from incomplete validation in `MaxPoolGrad` [CVE-2021-37674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37674) +* Fixes an undefined behavior arising from reference binding to nullptr in shape inference [CVE-2021-37676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37676) +* Fixes a division by 0 in most convolution operators [CVE-2021-37675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37675) +* Fixes vulnerabilities arising from missing validation in shape inference for `Dequantize` [CVE-2021-37677](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37677) +* Fixes an arbitrary code execution due to YAML deserialization [CVE-2021-37678](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37678) +* Fixes a heap OOB in nested `tf.map_fn` with `RaggedTensor`s [CVE-2021-37679](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37679) +* Fixes a division by zero in TFLite [CVE-2021-37680](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37680) +* Fixes an NPE in TFLite [CVE-2021-37681](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37681) +* Fixes a vulnerability arising from use of unitialized value in TFLite [CVE-2021-37682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37682) +* Fixes an FPE in TFLite division operations [CVE-2021-37683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37683) +* Fixes an FPE in TFLite pooling operations [CVE-2021-37684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37684) +* Fixes an infinite loop in TFLite [CVE-2021-37686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37686) +* Fixes a heap OOB in TFLite [CVE-2021-37685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37685) +* Fixes a heap OOB in TFLite's `Gather*` implementations [CVE-2021-37687](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37687) +* Fixes an undefined behavior arising from null pointer dereference in TFLite [CVE-2021-37688](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37688) +* Fixes an undefined behavior arising from null pointer dereference in TFLite MLIR optimizations [CVE-2021-37689](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37689) +* Fixes a FPE in LSH in TFLite [CVE-2021-37691](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37691) +* Fixes a segfault on strings tensors with mismatched dimensions, arising in Go code [CVE-2021-37692](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37692) +* Fixes a use after free and a potential segfault in shape inference functions [CVE-2021-37690](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37690) +* Updates `curl` to `7.77.0` to handle [CVE-2021-22876](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22876), [CVE-2021-22897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22897), [CVE-2021-22898](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22898), and [CVE-2021-22901](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22901). + # Release 2.3.3 This release introduces several vulnerability fixes: - * Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512)) - * Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514)) - * Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513)) - * Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515)) - * Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516)) - * Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517)) - * Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518)) - * Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519)) - * Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521)) - * Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520)) - * Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522)) - * Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523)) - * Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524)) - * Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525)) - * Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526)) - * Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527)) - * Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528)) - * Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530)) - * Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529)) - * Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531)) - * Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532)) - * Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533)) - * Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535)) - * Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534)) - * Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537)) - * Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536)) - * Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538)) - * Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540)) - * Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542)) - * Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541)) - * Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544)) - * Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543)) - * Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545)) - * Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546)) - * Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547)) - * Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548)) - * Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549)) - * Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550)) - * Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551)) - * Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553)) - * Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552)) - * Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554)) - * Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555)) - * Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557)) - * Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556)) - * Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558)) - * Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559)) - * Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560)) - * Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561)) - * Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562)) - * Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563)) - * Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564)) - * Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565)) - * Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566)) - * Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568)) - * Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567)) - * Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570)) - * Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569)) - * Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571)) - * Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572)) - * Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575)) - * Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573)) - * Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574)) - * Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576)) - * Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577)) - * Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580)) - * Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578)) - * Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579)) - * Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581)) - * Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582)) - * Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584)) - * Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583)) - * Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585)) - * Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586)) - * Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587)) - * Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589)) - * Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588)) - * Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590)) - * Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592)) - * Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591)) - * Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595)) - * Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594)) - * Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596)) - * Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593)) - * Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597)) - * Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598)) - * Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599)) - * Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600)) - * Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602)) - * Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604)) - * Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601)) - * Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605)) - * Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603)) - * Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606)) - * Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608)) - * Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609)) - * Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607)) - * Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611)) - * Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610)) - * Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612)) - * Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613)) - * Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614)) - * Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615)) - * Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616)) - * Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618)) - * Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617)) - * Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619)) - * Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539)) - * Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286). +* Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512)) +* Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514)) +* Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513)) +* Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515)) +* Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516)) +* Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517)) +* Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518)) +* Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519)) +* Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521)) +* Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520)) +* Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522)) +* Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523)) +* Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524)) +* Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525)) +* Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526)) +* Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527)) +* Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528)) +* Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530)) +* Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529)) +* Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531)) +* Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532)) +* Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533)) +* Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535)) +* Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534)) +* Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537)) +* Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536)) +* Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538)) +* Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540)) +* Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542)) +* Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541)) +* Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544)) +* Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543)) +* Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545)) +* Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546)) +* Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547)) +* Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548)) +* Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549)) +* Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550)) +* Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551)) +* Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553)) +* Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552)) +* Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554)) +* Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555)) +* Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557)) +* Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556)) +* Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558)) +* Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559)) +* Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560)) +* Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561)) +* Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562)) +* Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563)) +* Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564)) +* Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565)) +* Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566)) +* Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568)) +* Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567)) +* Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570)) +* Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569)) +* Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571)) +* Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572)) +* Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575)) +* Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573)) +* Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574)) +* Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576)) +* Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577)) +* Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580)) +* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578)) +* Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579)) +* Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581)) +* Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582)) +* Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584)) +* Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583)) +* Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585)) +* Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586)) +* Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587)) +* Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589)) +* Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588)) +* Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590)) +* Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592)) +* Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591)) +* Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595)) +* Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594)) +* Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596)) +* Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593)) +* Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597)) +* Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598)) +* Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599)) +* Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600)) +* Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602)) +* Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604)) +* Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601)) +* Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605)) +* Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603)) +* Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606)) +* Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608)) +* Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609)) +* Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607)) +* Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611)) +* Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610)) +* Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612)) +* Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613)) +* Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614)) +* Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615)) +* Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616)) +* Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618)) +* Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617)) +* Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619)) +* Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539)) +* Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286). # Release 2.3.2 From ef604cc1c87e82a9e10bb9c7b600e85395fac095 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 7 Aug 2021 14:14:29 -0700 Subject: [PATCH 249/256] Put CVE numbers for fixes in parentheses --- RELEASE.md | 116 ++++++++++++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index c6060ffdbc21ae..39db9362064462 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -2,64 +2,64 @@ This release introduces several vulnerability fixes: -* Fixes a heap out of bounds access in sparse reduction operations [CVE-2021-37635](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37635) -* Fixes a floating point exception in `SparseDenseCwiseDiv` [CVE-2021-37636](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37636) -* Fixes a null pointer dereference in `CompressElement` [CVE-2021-37637](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37637) -* Fixes a null pointer dereference in `RaggedTensorToTensor` [CVE-2021-37638](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37638) -* Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors [CVE-2021-37639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37639) -* Fixes an integer division by 0 in sparse reshaping [CVE-2021-37640](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37640) -* Fixes a division by 0 in `ResourceScatterDiv` [CVE-2021-37642](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37642) -* Fixes a heap OOB in `RaggedGather` [CVE-2021-37641](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37641) -* Fixes a `std::abort` raised from `TensorListReserve` [CVE-2021-37644](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37644) -* Fixes a null pointer dereference in `MatrixDiagPartOp` [CVE-2021-37643](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37643) -* Fixes an integer overflow due to conversion to unsigned [CVE-2021-37645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37645) -* Fixes a bad allocation error in `StringNGrams` caused by integer conversion [CVE-2021-37646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37646) -* Fixes a null pointer dereference in `SparseTensorSliceDataset` [CVE-2021-37647](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37647) -* Fixes an incorrect validation of `SaveV2` inputs [CVE-2021-37648](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37648) -* Fixes a null pointer dereference in `UncompressElement` [CVE-2021-37649](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37649) -* Fixes a segfault and a heap buffer overflow in `{Experimental,}DatasetToTFRecord` [CVE-2021-37650](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37650) -* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` [CVE-2021-37651](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37651) -* Fixes a use after free in boosted trees creation [CVE-2021-37652](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37652) -* Fixes a division by 0 in `ResourceGather` [CVE-2021-37653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37653) -* Fixes a heap OOB and a `CHECK` fail in `ResourceGather` [CVE-2021-37654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37654) -* Fixes a heap OOB in `ResourceScatterUpdate` [CVE-2021-37655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37655) -* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToSparse` [CVE-2021-37656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37656) -* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixDiagV*` ops [CVE-2021-37657](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37657) -* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixSetDiagV*` ops [CVE-2021-37658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37658) -* Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops [CVE-2021-37659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37659) -* Fixes a division by 0 in inplace operations [CVE-2021-37660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37660) -* Fixes a crash caused by integer conversion to unsigned [CVE-2021-37661](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37661) -* Fixes an undefined behavior arising from reference binding to nullptr in boosted trees [CVE-2021-37662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37662) -* Fixes a heap OOB in boosted trees [CVE-2021-37664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37664) -* Fixes vulnerabilities arising from incomplete validation in `QuantizeV2` [CVE-2021-37663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37663) -* Fixes vulnerabilities arising from incomplete validation in MKL requantization [CVE-2021-37665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37665) -* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToVariant` [CVE-2021-37666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37666) -* Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding [CVE-2021-37667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37667) -* Fixes an FPE in `tf.raw_ops.UnravelIndex` [CVE-2021-37668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37668) -* Fixes a crash in NMS ops caused by integer conversion to unsigned [CVE-2021-37669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37669) -* Fixes a heap OOB in `UpperBound` and `LowerBound` [CVE-2021-37670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37670) -* Fixes an undefined behavior arising from reference binding to nullptr in map operations [CVE-2021-37671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37671) -* Fixes a heap OOB in `SdcaOptimizerV2` [CVE-2021-37672](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37672) -* Fixes a `CHECK`-fail in `MapStage` [CVE-2021-37673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37673) -* Fixes a vulnerability arising from incomplete validation in `MaxPoolGrad` [CVE-2021-37674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37674) -* Fixes an undefined behavior arising from reference binding to nullptr in shape inference [CVE-2021-37676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37676) -* Fixes a division by 0 in most convolution operators [CVE-2021-37675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37675) -* Fixes vulnerabilities arising from missing validation in shape inference for `Dequantize` [CVE-2021-37677](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37677) -* Fixes an arbitrary code execution due to YAML deserialization [CVE-2021-37678](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37678) -* Fixes a heap OOB in nested `tf.map_fn` with `RaggedTensor`s [CVE-2021-37679](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37679) -* Fixes a division by zero in TFLite [CVE-2021-37680](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37680) -* Fixes an NPE in TFLite [CVE-2021-37681](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37681) -* Fixes a vulnerability arising from use of unitialized value in TFLite [CVE-2021-37682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37682) -* Fixes an FPE in TFLite division operations [CVE-2021-37683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37683) -* Fixes an FPE in TFLite pooling operations [CVE-2021-37684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37684) -* Fixes an infinite loop in TFLite [CVE-2021-37686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37686) -* Fixes a heap OOB in TFLite [CVE-2021-37685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37685) -* Fixes a heap OOB in TFLite's `Gather*` implementations [CVE-2021-37687](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37687) -* Fixes an undefined behavior arising from null pointer dereference in TFLite [CVE-2021-37688](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37688) -* Fixes an undefined behavior arising from null pointer dereference in TFLite MLIR optimizations [CVE-2021-37689](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37689) -* Fixes a FPE in LSH in TFLite [CVE-2021-37691](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37691) -* Fixes a segfault on strings tensors with mismatched dimensions, arising in Go code [CVE-2021-37692](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37692) -* Fixes a use after free and a potential segfault in shape inference functions [CVE-2021-37690](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37690) +* Fixes a heap out of bounds access in sparse reduction operations ([CVE-2021-37635](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37635)) +* Fixes a floating point exception in `SparseDenseCwiseDiv` ([CVE-2021-37636](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37636)) +* Fixes a null pointer dereference in `CompressElement` ([CVE-2021-37637](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37637)) +* Fixes a null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-37638](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37638)) +* Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors ([CVE-2021-37639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37639)) +* Fixes an integer division by 0 in sparse reshaping ([CVE-2021-37640](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37640)) +* Fixes a division by 0 in `ResourceScatterDiv` ([CVE-2021-37642](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37642)) +* Fixes a heap OOB in `RaggedGather` ([CVE-2021-37641](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37641)) +* Fixes a `std::abort` raised from `TensorListReserve` ([CVE-2021-37644](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37644)) +* Fixes a null pointer dereference in `MatrixDiagPartOp` ([CVE-2021-37643](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37643)) +* Fixes an integer overflow due to conversion to unsigned ([CVE-2021-37645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37645)) +* Fixes a bad allocation error in `StringNGrams` caused by integer conversion ([CVE-2021-37646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37646)) +* Fixes a null pointer dereference in `SparseTensorSliceDataset` ([CVE-2021-37647](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37647)) +* Fixes an incorrect validation of `SaveV2` inputs ([CVE-2021-37648](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37648)) +* Fixes a null pointer dereference in `UncompressElement` ([CVE-2021-37649](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37649)) +* Fixes a segfault and a heap buffer overflow in `{Experimental,}DatasetToTFRecord` ([CVE-2021-37650](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37650)) +* Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-37651](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37651)) +* Fixes a use after free in boosted trees creation ([CVE-2021-37652](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37652)) +* Fixes a division by 0 in `ResourceGather` ([CVE-2021-37653](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37653)) +* Fixes a heap OOB and a `CHECK` fail in `ResourceGather` ([CVE-2021-37654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37654)) +* Fixes a heap OOB in `ResourceScatterUpdate` ([CVE-2021-37655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37655)) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToSparse` ([CVE-2021-37656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37656)) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixDiagV*` ops ([CVE-2021-37657](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37657)) +* Fixes an undefined behavior arising from reference binding to nullptr in `MatrixSetDiagV*` ops ([CVE-2021-37658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37658)) +* Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops ([CVE-2021-37659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37659)) +* Fixes a division by 0 in inplace operations ([CVE-2021-37660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37660)) +* Fixes a crash caused by integer conversion to unsigned ([CVE-2021-37661](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37661)) +* Fixes an undefined behavior arising from reference binding to nullptr in boosted trees ([CVE-2021-37662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37662)) +* Fixes a heap OOB in boosted trees ([CVE-2021-37664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37664)) +* Fixes vulnerabilities arising from incomplete validation in `QuantizeV2` ([CVE-2021-37663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37663)) +* Fixes vulnerabilities arising from incomplete validation in MKL requantization ([CVE-2021-37665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37665)) +* Fixes an undefined behavior arising from reference binding to nullptr in `RaggedTensorToVariant` ([CVE-2021-37666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37666)) +* Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding ([CVE-2021-37667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37667)) +* Fixes an FPE in `tf.raw_ops.UnravelIndex` ([CVE-2021-37668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37668)) +* Fixes a crash in NMS ops caused by integer conversion to unsigned ([CVE-2021-37669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37669)) +* Fixes a heap OOB in `UpperBound` and `LowerBound` ([CVE-2021-37670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37670)) +* Fixes an undefined behavior arising from reference binding to nullptr in map operations ([CVE-2021-37671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37671)) +* Fixes a heap OOB in `SdcaOptimizerV2` ([CVE-2021-37672](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37672)) +* Fixes a `CHECK`-fail in `MapStage` ([CVE-2021-37673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37673)) +* Fixes a vulnerability arising from incomplete validation in `MaxPoolGrad` ([CVE-2021-37674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37674)) +* Fixes an undefined behavior arising from reference binding to nullptr in shape inference ([CVE-2021-37676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37676)) +* Fixes a division by 0 in most convolution operators ([CVE-2021-37675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37675)) +* Fixes vulnerabilities arising from missing validation in shape inference for `Dequantize` ([CVE-2021-37677](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37677)) +* Fixes an arbitrary code execution due to YAML deserialization ([CVE-2021-37678](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37678)) +* Fixes a heap OOB in nested `tf.map_fn` with `RaggedTensor`s ([CVE-2021-37679](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37679)) +* Fixes a division by zero in TFLite ([CVE-2021-37680](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37680)) +* Fixes an NPE in TFLite ([CVE-2021-37681](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37681)) +* Fixes a vulnerability arising from use of unitialized value in TFLite ([CVE-2021-37682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37682)) +* Fixes an FPE in TFLite division operations ([CVE-2021-37683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37683)) +* Fixes an FPE in TFLite pooling operations ([CVE-2021-37684](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37684)) +* Fixes an infinite loop in TFLite ([CVE-2021-37686](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37686)) +* Fixes a heap OOB in TFLite ([CVE-2021-37685](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37685)) +* Fixes a heap OOB in TFLite's `Gather*` implementations ([CVE-2021-37687](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37687)) +* Fixes an undefined behavior arising from null pointer dereference in TFLite ([CVE-2021-37688](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37688)) +* Fixes an undefined behavior arising from null pointer dereference in TFLite MLIR optimizations ([CVE-2021-37689](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37689)) +* Fixes a FPE in LSH in TFLite ([CVE-2021-37691](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37691)) +* Fixes a segfault on strings tensors with mismatched dimensions, arising in Go code ([CVE-2021-37692](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37692)) +* Fixes a use after free and a potential segfault in shape inference functions ([CVE-2021-37690](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37690)) * Updates `curl` to `7.77.0` to handle [CVE-2021-22876](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22876), [CVE-2021-22897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22897), [CVE-2021-22898](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22898), and [CVE-2021-22901](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-22901). # Release 2.3.3 From 429654f208d1c4070398f1dd73d4ca823ab4eba2 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Sat, 7 Aug 2021 14:59:02 -0700 Subject: [PATCH 250/256] Update version numbers to 2.3.4 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index f9f48582e3c958..efd1256f3f1d79 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 3 -#define TF_PATCH_VERSION 3 +#define TF_PATCH_VERSION 4 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index da0ef93f8cef33..1e20a6713051d9 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -59,7 +59,7 @@ load( # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.3.3" +VERSION = "2.3.4" VERSION_MAJOR = VERSION.split(".")[0] # Sanitize a dependency so that it works correctly from code that includes diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 888d81d1f80458..d944993c48fa28 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -49,7 +49,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.3.3' +_VERSION = '2.3.4' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 55a6511d4b6433b125a4e1adeada10d7c891e290 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 7 Aug 2021 16:46:09 -0700 Subject: [PATCH 251/256] Fix build --- .../kernels/internal/optimized/integer_ops/pooling.h | 9 +++++---- .../lite/kernels/internal/optimized/optimized_ops.h | 8 +++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h index ca94869f564d48..dfe8bd9b545fc6 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h @@ -285,14 +285,15 @@ inline bool AveragePool16(const PoolParams& params, return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8* input_data, const RuntimeShape& output_shape, int8* output_data) { if (params.filter_height * params.filter_width > 16 * 16) { - reference_integer_ops::AveragePool(params, input_shape, input_data, - output_shape, output_data); + return reference_integer_ops::AveragePool(params, input_shape, input_data, + output_shape, output_data); } else { - AveragePool16(params, input_shape, input_data, output_shape, output_data); + return AveragePool16(params, input_shape, input_data, output_shape, + output_data); } } diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index b0398903512f2f..bfb98ab937262e 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -3563,14 +3563,16 @@ inline bool AveragePool32(const PoolParams& params, return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8* input_data, const RuntimeShape& output_shape, uint8* output_data) { if (params.filter_height * params.filter_width > 16 * 16) { - AveragePool32(params, input_shape, input_data, output_shape, output_data); + return AveragePool32(params, input_shape, input_data, output_shape, + output_data); } else { - AveragePool16(params, input_shape, input_data, output_shape, output_data); + return AveragePool16(params, input_shape, input_data, output_shape, + output_data); } } From 106316a9077cfabca5d54721650c9a65fef4dc6a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 7 Aug 2021 17:18:11 -0700 Subject: [PATCH 252/256] Fix build --- tensorflow/core/common_runtime/shape_refiner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc index 906bd14f96c847..3c5421a9507076 100644 --- a/tensorflow/core/common_runtime/shape_refiner.cc +++ b/tensorflow/core/common_runtime/shape_refiner.cc @@ -132,7 +132,7 @@ Status InferShapesForFunctionSubNode(const Node* node, ShapeRefiner* refiner, TF_RETURN_IF_ERROR( outer_context->MakeShapeFromShapeProto(proto, &handle)); copied_shapes_and_types.push_back( - ShapeAndType(handle, shape_and_type.dtype, shape_and_type.specialized_type)); + ShapeAndType(handle, shape_and_type.dtype)); } outer_context->set_output_handle_shapes_and_types( From 21391b712693b538254a4605fabc863747296b29 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 8 Aug 2021 13:55:19 -0700 Subject: [PATCH 253/256] Fix compile error, missing implicit conversion --- tensorflow/core/kernels/sparse_dense_binary_op_shared.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index dda05dbc3b8cb2..edc238faa98db9 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -147,7 +147,7 @@ class SparseDenseBinaryOpShared : public OpKernel { "dense side with broadcasted shape")); \ dense_gathered_flat(i) = rhs_ref.coeff(idx); \ if (op_is_div) { \ - OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \ + OP_REQUIRES(ctx, dense_gathered_flat(i) != T(0), \ errors::InvalidArgument( \ "SparseDenseCwiseDiv cannot divide by zero," \ "but input dense tensor contains zero ")); \ From 935b8e88a682d1aa53395aae29d27372f25a99a2 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 8 Aug 2021 14:03:44 -0700 Subject: [PATCH 254/256] Fix compile error, no function call needed --- tensorflow/core/kernels/ragged_tensor_from_variant_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc index 17dbc1d117f24c..1d9ddfefdae6e5 100644 --- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc @@ -200,7 +200,7 @@ Status NestedStackRaggedTensors( for (int i = 0; i < ragged_components.size(); i++) { // Check that the flat_values tensor shape is compatible. - TensorShape value_shape = ragged_components[i].values().shape(); + TensorShape value_shape = ragged_components[i].values.shape(); value_shape.RemoveDim(0); if (value_shape != expected_value_shape) { return errors::InvalidArgument( From 18ae67a49533523b70f201e6c6aee70b113631aa Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 8 Aug 2021 13:48:17 -0700 Subject: [PATCH 255/256] Disable broken/flaky test --- tensorflow/python/kernel_tests/distributions/BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/python/kernel_tests/distributions/BUILD b/tensorflow/python/kernel_tests/distributions/BUILD index 549d7b4c98ece1..3325c853cf23fd 100644 --- a/tensorflow/python/kernel_tests/distributions/BUILD +++ b/tensorflow/python/kernel_tests/distributions/BUILD @@ -60,6 +60,9 @@ cuda_py_test( name = "beta_test", size = "small", srcs = ["beta_test.py"], + tags = [ + "no_oss", + ], deps = [ "//tensorflow/python:client", "//tensorflow/python:client_testlib", From 7462dcaae1e8cfe1dfd0c62dd6083f9749a9d827 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 11 Aug 2021 15:06:44 -0700 Subject: [PATCH 256/256] Add cleandep --- tensorflow/workspace.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 87af1667fd1a4b..c38c80cc30c93f 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -671,10 +671,10 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "curl", - build_file = "//third_party:curl.BUILD", + build_file = clean_dep("//third_party:curl.BUILD"), sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77", strip_prefix = "curl-7.77.0", - system_build_file = "//third_party/systemlibs:curl.BUILD", + system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.77.0.tar.gz", "https://curl.haxx.se/download/curl-7.77.0.tar.gz",