From 608883d2186f6bfba2f85da838f8030fe71434f8 Mon Sep 17 00:00:00 2001 From: Austin Anderson Date: Mon, 27 Jan 2020 12:48:36 -0800 Subject: [PATCH 001/243] Remove 2.1.x Python 2 docker images from spec --- .../dockerfiles/dockerfiles/cpu-jupyter.Dockerfile | 12 ++++++------ .../dockerfiles/devel-cpu-jupyter.Dockerfile | 12 ++++++------ .../dockerfiles/dockerfiles/devel-cpu.Dockerfile | 1 + .../dockerfiles/devel-gpu-jupyter.Dockerfile | 12 ++++++------ .../dockerfiles/dockerfiles/gpu-jupyter.Dockerfile | 12 ++++++------ .../ppc64le/cpu-ppc64le-jupyter.Dockerfile | 12 ++++++------ .../ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile | 12 ++++++------ .../ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile | 12 ++++++------ .../ppc64le/gpu-ppc64le-jupyter.Dockerfile | 12 ++++++------ .../dockerfiles/partials/jupyter.partial.Dockerfile | 12 ++++++------ tensorflow/tools/dockerfiles/spec.yml | 2 +- 11 files changed, 56 insertions(+), 55 deletions(-) diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile index 46443bb6946fbe..12987d1a8cfcbf 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile @@ -66,12 +66,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile index 23e4458689a477..7eddaccfa97c23 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu-jupyter.Dockerfile @@ -119,12 +119,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile index ec75054edc8925..4bc42af7ad09f2 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-cpu.Dockerfile @@ -109,3 +109,4 @@ RUN mkdir /bazel && \ rm -f /bazel/installer.sh COPY bashrc /etc/bash.bashrc +RUN chmod a+rwx /etc/bash.bashrc diff --git a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile index ddb10a08a57a22..25537ad963a6b0 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile @@ -152,12 +152,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile index fe2045bf1934f2..19409e27f99614 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/gpu-jupyter.Dockerfile @@ -105,12 +105,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile index 907d6af7b3c42b..2eec5b14b6b523 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/cpu-ppc64le-jupyter.Dockerfile @@ -84,12 +84,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile index b85f157e5e4783..f07d15bcf9da7d 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-cpu-ppc64le-jupyter.Dockerfile @@ -120,12 +120,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile index 49110036a1a67e..164f20c7b84dc2 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/devel-gpu-ppc64le-jupyter.Dockerfile @@ -153,12 +153,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile index 71a1b79a3db70a..cc69d18a810bb6 100644 --- a/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile +++ b/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/gpu-ppc64le-jupyter.Dockerfile @@ -123,12 +123,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile index 8290021a1ac345..ee1d276d97133d 100644 --- a/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile +++ b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile @@ -6,12 +6,12 @@ RUN mkdir -p /tf/tensorflow-tutorials && chmod -R a+rwx /tf/ RUN mkdir /.local && chmod a+rwx /.local RUN apt-get install -y --no-install-recommends wget WORKDIR /tf/tensorflow-tutorials -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/overfit_and_underfit.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/regression.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/save_and_load.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification.ipynb -RUN wget https://raw.githubusercontent.com/tensorflow/docs/master/site/en/tutorials/keras/text_classification_with_hub.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/overfit_and_underfit.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/regression.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/save_and_load.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification.ipynb +RUN wget https://raw.githubusercontent.com/tensorflow/docs/r2.1/site/en/tutorials/keras/text_classification_with_hub.ipynb COPY readme-for-jupyter.md README.md RUN apt-get autoremove -y && apt-get remove -y wget WORKDIR /tf diff --git a/tensorflow/tools/dockerfiles/spec.yml b/tensorflow/tools/dockerfiles/spec.yml index 79fb7785d8f7a4..d83b0f8f7de74e 100644 --- a/tensorflow/tools/dockerfiles/spec.yml +++ b/tensorflow/tools/dockerfiles/spec.yml @@ -63,7 +63,7 @@ slice_sets: py: - add_to_name: "" args: - - USE_PYTHON_3_NOT_2= + - USE_PYTHON_3_NOT_2=1 - add_to_name: "-py3" args: - USE_PYTHON_3_NOT_2=1 From dcef4400b77b10e78f24f8cd62495fccd363c230 Mon Sep 17 00:00:00 2001 From: YoungSeok Yoon Date: Wed, 4 Dec 2019 21:13:39 -0800 Subject: [PATCH 002/243] Fix the eigen archive download path PiperOrigin-RevId: 283899224 Change-Id: Ibd2ca25f9339de143e17569d9296c7f23ae4135c --- tensorflow/lite/tools/make/download_dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/tools/make/download_dependencies.sh b/tensorflow/lite/tools/make/download_dependencies.sh index 4b4df1e9f9d1b1..cea13f8d9dda70 100755 --- a/tensorflow/lite/tools/make/download_dependencies.sh +++ b/tensorflow/lite/tools/make/download_dependencies.sh @@ -29,7 +29,7 @@ if [ ! -f $BZL_FILE_PATH ]; then exit 1; fi -EIGEN_URL="$(grep -o 'http.*bitbucket.org/eigen/eigen/get/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.tensorflow | head -n1)" +EIGEN_URL="$(grep -o 'http.*github.com/eigenteam/eigen-git-mirror/archive/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.tensorflow | head -n1)" GEMMLOWP_URL="$(grep -o 'https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)" GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz" ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)" From f270180a6caa8693f2b2888ac7e6b8e69c4feaa8 Mon Sep 17 00:00:00 2001 From: YoungSeok Yoon Date: Thu, 5 Dec 2019 17:08:49 -0800 Subject: [PATCH 003/243] Fix the eigen archive download path PiperOrigin-RevId: 284086540 Change-Id: Id0b6a9d71119fc6487bc94defbf4e8f4ccbda94b --- tensorflow/lite/tools/make/download_dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/tools/make/download_dependencies.sh b/tensorflow/lite/tools/make/download_dependencies.sh index cea13f8d9dda70..25e7d6b7894993 100755 --- a/tensorflow/lite/tools/make/download_dependencies.sh +++ b/tensorflow/lite/tools/make/download_dependencies.sh @@ -29,7 +29,7 @@ if [ ! -f $BZL_FILE_PATH ]; then exit 1; fi -EIGEN_URL="$(grep -o 'http.*github.com/eigenteam/eigen-git-mirror/archive/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.tensorflow | head -n1)" +EIGEN_URL="$(grep -o 'https.*gitlab.com/libeigen/eigen/-/archive/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.tensorflow | head -n1)" GEMMLOWP_URL="$(grep -o 'https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)" GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz" ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)" From ee59a415adbfa544905e5809404425ba7b346a44 Mon Sep 17 00:00:00 2001 From: Clayne Robison Date: Fri, 21 Feb 2020 08:15:14 -0800 Subject: [PATCH 004/243] [Intel Mkl] Upgrade Sqlite3 to fix CVE-2019-19880 CVE-2019-19244 and CVE-2019-19645 --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 77e605fe76a6aa..fa467fc23ca128 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -275,12 +275,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "adf051d4c10781ea5cfabbbc4a2577b6ceca68590d23b58b8260a8e24cc5f081", - strip_prefix = "sqlite-amalgamation-3300100", + sha256 = "f3c79bc9f4162d0b06fa9fe09ee6ccd23bb99ce310b792c5145f87fbcc30efca", + strip_prefix = "sqlite-amalgamation-3310100", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2019/sqlite-amalgamation-3300100.zip", - "https://www.sqlite.org/2019/sqlite-amalgamation-3300100.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3310100.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3310100.zip", ], ) From 35ae676a1e7d3b47231d356e361a39ebfdf03572 Mon Sep 17 00:00:00 2001 From: hudsonrong Date: Sat, 21 Mar 2020 15:57:04 +0800 Subject: [PATCH 005/243] fix a compile error of r2.1 caused by skylib address invalid. --- WORKSPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/WORKSPACE b/WORKSPACE index babb14b509e5c0..bcdd4f46e2f527 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -67,7 +67,7 @@ http_archive( http_archive( name = "bazel_skylib", sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0", - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel-skylib.0.9.0.tar.gz"], + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz"], ) # https://github.com/bazelbuild/bazel-skylib/releases http_archive( name = "com_github_apple_swift_swift_protobuf", From c902d625a7cb2ee5ab191374674e8c0ca6021a54 Mon Sep 17 00:00:00 2001 From: TensorFlower Gardener Date: Sun, 5 Apr 2020 18:40:56 -0700 Subject: [PATCH 006/243] Merge pull request #38200 from Intel-tensorflow:chuanqiw/curl_upgrade PiperOrigin-RevId: 304938718 Change-Id: I408e3b1d9ce1badfb08666ddac6400bae2c97936 --- tensorflow/workspace.bzl | 8 ++++---- third_party/curl.BUILD | 8 +++++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 77e605fe76a6aa..27cdc142324028 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -502,12 +502,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "curl", build_file = clean_dep("//third_party:curl.BUILD"), - sha256 = "d0393da38ac74ffac67313072d7fe75b1fa1010eb5987f63f349b024a36b7ffb", - strip_prefix = "curl-7.66.0", + sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", + strip_prefix = "curl-7.69.1", system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.66.0.tar.gz", - "https://curl.haxx.se/download/curl-7.66.0.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz", + "https://curl.haxx.se/download/curl-7.69.1.tar.gz", ], ) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index c28dd15461624e..e07450fddc53ed 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -161,6 +161,8 @@ cc_library( "lib/quic.h", "lib/rand.c", "lib/rand.h", + "lib/rename.h", + "lib/rename.c", "lib/rtsp.c", "lib/rtsp.h", "lib/security.c", @@ -181,13 +183,13 @@ cc_library( "lib/smb.h", "lib/smtp.h", "lib/sockaddr.h", + "lib/socketpair.h", "lib/socks.c", "lib/socks.h", "lib/speedcheck.c", "lib/speedcheck.h", "lib/splay.c", "lib/splay.h", - "lib/ssh.h", "lib/strcase.c", "lib/strcase.h", "lib/strdup.c", @@ -217,13 +219,13 @@ cc_library( "lib/vauth/vauth.c", "lib/vauth/vauth.h", "lib/version.c", + "lib/vssh/ssh.h", + "lib/vtls/bearssl.h", "lib/vtls/gskit.h", "lib/vtls/gtls.h", "lib/vtls/mbedtls.h", "lib/vtls/nssg.h", "lib/vtls/openssl.h", - "lib/vtls/polarssl.h", - "lib/vtls/polarssl_threadlock.h", "lib/vtls/schannel.h", "lib/vtls/vtls.c", "lib/vtls/vtls.h", From c4cfba938d7a7decd9e8ddaaf5d7de5a8cca641c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 7 Apr 2020 15:23:47 -0700 Subject: [PATCH 007/243] Add `socketpair.c` to curl buildable files to fix Windows builds. Follow-up from bfb0e49d5844d835ab757a1709a1bcfc216d78f8 PiperOrigin-RevId: 305351839 Change-Id: Ic7a8b4942394d6d030e93b3ad9179e0bffdc434c --- third_party/curl.BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index e07450fddc53ed..2813b980d47ca4 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -184,6 +184,7 @@ cc_library( "lib/smtp.h", "lib/sockaddr.h", "lib/socketpair.h", + "lib/socketpair.c", "lib/socks.c", "lib/socks.h", "lib/speedcheck.c", From ce0af3e0688af895b6c92521a22676105fdf4ed7 Mon Sep 17 00:00:00 2001 From: TensorFlower Gardener Date: Thu, 9 Apr 2020 13:01:15 -0700 Subject: [PATCH 008/243] Merge pull request #38401 from yongtang:libjpeg-turbo PiperOrigin-RevId: 305742301 Change-Id: I50968c0868f70a5009f018d3132cba77c0900158 --- third_party/jpeg/BUILD.bazel | 30 +++++++++++++++--------------- third_party/jpeg/workspace.bzl | 8 ++++---- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/third_party/jpeg/BUILD.bazel b/third_party/jpeg/BUILD.bazel index 90e45237c7d488..269e5254c86c96 100644 --- a/third_party/jpeg/BUILD.bazel +++ b/third_party/jpeg/BUILD.bazel @@ -516,30 +516,30 @@ JCONFIG_NOWIN_COMMON_SUBSTITUTIONS = { "@JPEG_LIB_VERSION@": "62", "@VERSION@": "2.0.0", "@LIBJPEG_TURBO_VERSION_NUMBER@": "2000000", - "#cmakedefine C_ARITH_CODING_SUPPORTED": "#define C_ARITH_CODING_SUPPORTED", - "#cmakedefine D_ARITH_CODING_SUPPORTED": "#define D_ARITH_CODING_SUPPORTED", - "#cmakedefine MEM_SRCDST_SUPPORTED": "#define MEM_SRCDST_SUPPORTED", + "#cmakedefine C_ARITH_CODING_SUPPORTED 1": "#define C_ARITH_CODING_SUPPORTED 1", + "#cmakedefine D_ARITH_CODING_SUPPORTED 1": "#define D_ARITH_CODING_SUPPORTED 1", + "#cmakedefine MEM_SRCDST_SUPPORTED 1": "#define MEM_SRCDST_SUPPORTED 1", "@BITS_IN_JSAMPLE@": "8", - "#cmakedefine HAVE_LOCALE_H": "#define HAVE_LOCALE_H 1", - "#cmakedefine HAVE_STDDEF_H": "#define HAVE_STDDEF_H 1", - "#cmakedefine HAVE_STDLIB_H": "#define HAVE_STDLIB_H 1", - "#cmakedefine NEED_SYS_TYPES_H": "#define NEED_SYS_TYPES_H", - "#cmakedefine NEED_BSD_STRINGS": "", - "#cmakedefine HAVE_UNSIGNED_CHAR": "#define HAVE_UNSIGNED_CHAR 1", - "#cmakedefine HAVE_UNSIGNED_SHORT": "#define HAVE_UNSIGNED_SHORT 1", - "#cmakedefine INCOMPLETE_TYPES_BROKEN": "", - "#cmakedefine RIGHT_SHIFT_IS_UNSIGNED": "", - "#cmakedefine __CHAR_UNSIGNED__": "", + "#cmakedefine HAVE_LOCALE_H 1": "#define HAVE_LOCALE_H 1", + "#cmakedefine HAVE_STDDEF_H 1": "#define HAVE_STDDEF_H 1", + "#cmakedefine HAVE_STDLIB_H 1": "#define HAVE_STDLIB_H 1", + "#cmakedefine NEED_SYS_TYPES_H 1": "#define NEED_SYS_TYPES_H 1", + "#cmakedefine NEED_BSD_STRINGS 1": "", + "#cmakedefine HAVE_UNSIGNED_CHAR 1": "#define HAVE_UNSIGNED_CHAR 1", + "#cmakedefine HAVE_UNSIGNED_SHORT 1": "#define HAVE_UNSIGNED_SHORT 1", + "#cmakedefine INCOMPLETE_TYPES_BROKEN 1": "", + "#cmakedefine RIGHT_SHIFT_IS_UNSIGNED 1": "", + "#cmakedefine __CHAR_UNSIGNED__ 1": "", "#undef const": "", "#undef size_t": "", } JCONFIG_NOWIN_SIMD_SUBSTITUTIONS = { - "#cmakedefine WITH_SIMD": "#define WITH_SIMD", + "#cmakedefine WITH_SIMD 1": "#define WITH_SIMD 1", } JCONFIG_NOWIN_NOSIMD_SUBSTITUTIONS = { - "#cmakedefine WITH_SIMD": "", + "#cmakedefine WITH_SIMD 1": "", } JCONFIG_NOWIN_SIMD_SUBSTITUTIONS.update(JCONFIG_NOWIN_COMMON_SUBSTITUTIONS) diff --git a/third_party/jpeg/workspace.bzl b/third_party/jpeg/workspace.bzl index e2137ba949feba..c458ff12ba8248 100644 --- a/third_party/jpeg/workspace.bzl +++ b/third_party/jpeg/workspace.bzl @@ -6,11 +6,11 @@ def repo(): third_party_http_archive( name = "libjpeg_turbo", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.0.tar.gz", - "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.0.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", + "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", ], - sha256 = "f892fff427ab3adffc289363eac26d197ce3ccacefe5f5822377348a8166069b", - strip_prefix = "libjpeg-turbo-2.0.0", + sha256 = "7777c3c19762940cff42b3ba4d7cd5c52d1671b39a79532050c85efb99079064", + strip_prefix = "libjpeg-turbo-2.0.4", build_file = "//third_party/jpeg:BUILD.bazel", system_build_file = "//third_party/jpeg:BUILD.system", ) From fb3f4f1313c472ee73732b4254c17ab1b4567540 Mon Sep 17 00:00:00 2001 From: sunway513 Date: Fri, 10 Apr 2020 18:20:37 +0000 Subject: [PATCH 009/243] Update tensorflow 2.1 release branch ROCm path to build with hipclang --- tensorflow/core/kernels/conv_2d_gpu.h | 2 +- tensorflow/core/lib/bfloat16/bfloat16.h | 4 ++-- tensorflow/tools/ci_build/Dockerfile.rocm | 3 +-- .../crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl | 7 +++++++ third_party/gpus/rocm_configure.bzl | 7 ++++++- 5 files changed, 17 insertions(+), 6 deletions(-) diff --git a/tensorflow/core/kernels/conv_2d_gpu.h b/tensorflow/core/kernels/conv_2d_gpu.h index 22d7f939686a1b..31abe9dfead8ec 100644 --- a/tensorflow/core/kernels/conv_2d_gpu.h +++ b/tensorflow/core/kernels/conv_2d_gpu.h @@ -236,7 +236,7 @@ __global__ void SwapDimension1And2InTensor3UsingTiles( // One extra line in the inner dimension to avoid share memory bank conflict. // This is to mimic the following, but no constructor of T can be invoked. // __shared__ T shared_memory_tile[TileSizeI][TileSizeJ + 1]; -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_COMPILER_IS_HIP_CLANG __shared__ __align__( alignof(T)) char shared_mem_raw[TileSizeI * (TileSizeJ + 1) * sizeof(T)]; typedef T(*SharedMemoryTile)[TileSizeJ + 1]; diff --git a/tensorflow/core/lib/bfloat16/bfloat16.h b/tensorflow/core/lib/bfloat16/bfloat16.h index a133f7e0f17b64..f8a94e346f6d31 100644 --- a/tensorflow/core/lib/bfloat16/bfloat16.h +++ b/tensorflow/core/lib/bfloat16/bfloat16.h @@ -21,8 +21,8 @@ limitations under the License. #include "tensorflow/core/platform/byte_order.h" -#ifdef __CUDACC__ -// All functions callable from CUDA code must be qualified with __device__ +#if defined(__CUDACC__) || (defined(__HIPCC__) && defined(__HIP__)) +// All functions callable from CUDA and hipclang code must be qualified with __device__ #define B16_DEVICE_FUNC __host__ __device__ #else diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index a083bc6debd9e6..70029d2a9a90cc 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -58,8 +58,7 @@ RUN apt-get update --allow-insecure-repositories && DEBIAN_FRONTEND=noninteracti RUN apt-get update --allow-insecure-repositories && \ DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ rocm-dev rocm-libs hipcub rocm-utils rocm-cmake \ - rocfft miopen-hip miopengemm rocblas hipblas rocrand rccl \ - rocm-profiler cxlactivitylogger && \ + rocfft miopen-hip miopengemm rocblas hipblas rocrand rccl && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl index 8a94afbfde1f34..f5ac7b39dfdb9a 100755 --- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl +++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl @@ -173,6 +173,13 @@ def InvokeHipcc(argv, log=False): out = ' -o ' + out_file[0] hipccopts = ' ' + # In hip-clang environment, we need to make sure that hip header is included + # before some standard math header like is included in any source. + # Otherwise, we get build error. + # Also we need to retain warning about uninitialised shared variable as + # warning only, even when -Werror option is specified. + if HIPCC_IS_HIPCLANG: + hipccopts += ' --include=hip/hip_runtime.h -Wno-error=cuda-shared-init ' hipccopts += ' ' + hipcc_compiler_options # Use -fno-gpu-rdc by default for early GPU kernel finalization # This flag would trigger GPU kernels be generated at compile time, instead diff --git a/third_party/gpus/rocm_configure.bzl b/third_party/gpus/rocm_configure.bzl index 7a02bb4a280a34..8a8728fa4f7f16 100644 --- a/third_party/gpus/rocm_configure.bzl +++ b/third_party/gpus/rocm_configure.bzl @@ -203,6 +203,7 @@ def _rocm_include_path(repository_ctx, rocm_config): inc_dirs.append("/opt/rocm/llvm/lib/clang/8.0/include") inc_dirs.append("/opt/rocm/llvm/lib/clang/9.0.0/include") inc_dirs.append("/opt/rocm/llvm/lib/clang/10.0.0/include") + inc_dirs.append("/opt/rocm/llvm/lib/clang/11.0.0/include") # Add rocrand and hiprand headers inc_dirs.append("/opt/rocm/rocrand/include") @@ -237,6 +238,10 @@ def _rocm_include_path(repository_ctx, rocm_config): inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/10.0.0/include/") inc_dirs.append("/opt/rocm/hcc/lib/clang/10.0.0/include") + # Support hcc based off clang 11.0.0, included in ROCm3.1 + inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/11.0.0/include/") + inc_dirs.append("/opt/rocm/hcc/lib/clang/11.0.0/include") + return inc_dirs def _enable_rocm(repository_ctx): @@ -323,7 +328,7 @@ def _hipcc_is_hipclang(repository_ctx): ["grep", "HIP_COMPILER=clang", "/opt/rocm/hip/lib/.hipInfo"], empty_stdout_fine = True, ) - result = grep_result.stdout + result = grep_result.stdout.strip() if result == "HIP_COMPILER=clang": return "True" return "False" From c3101204da30477cf76374b2ca4e466342eaf7b1 Mon Sep 17 00:00:00 2001 From: Shane Smiskol Date: Fri, 6 Dec 2019 11:52:25 -0600 Subject: [PATCH 010/243] Fix dynamic display for PyCharm --- tensorflow/python/keras/utils/generic_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/keras/utils/generic_utils.py b/tensorflow/python/keras/utils/generic_utils.py index 8ff27a38d77294..12dff66dffa550 100644 --- a/tensorflow/python/keras/utils/generic_utils.py +++ b/tensorflow/python/keras/utils/generic_utils.py @@ -453,7 +453,8 @@ def __init__(self, target, width=30, verbose=1, interval=0.05, self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or 'ipykernel' in sys.modules or - 'posix' in sys.modules) + 'posix' in sys.modules or + 'PYCHARM_HOSTED' in os.environ) self._total_width = 0 self._seen_so_far = 0 # We use a dict + list to avoid garbage collection From e516e79186350c9ef94c2cd508b68419da2b3e08 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 5 May 2020 08:57:14 -0700 Subject: [PATCH 011/243] Increase Apache Spark version to 2.4.5 to handle GitHub Security Alert Handles CVE-2019-10099, CVE-2018-17190, CVE-2018-11770. To be cherrypicked on r1.15, r2.0, r2.1 and r2.2 branches PiperOrigin-RevId: 309955549 Change-Id: I5ee68fdd3270534066487be67232c1abc687f968 --- tensorflow/java/maven/spark-tensorflow-connector/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml index 727f18d8b6d8f7..f40090ac45d6d9 100644 --- a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml +++ b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml @@ -33,7 +33,7 @@ 2.2.6 3.0 1.8 - 2.3.1 + 2.4.5 2.7.3 4.11 From e78e17b524ffb68427c8586c60b3c63924ac85a8 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Sat, 9 May 2020 09:48:04 -0700 Subject: [PATCH 012/243] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 3468c459f4242b..8600767bd14e70 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.1.1 + + + # Release 2.1.0 TensorFlow 2.1 will be the last TF release supporting Python 2. Python 2 support [officially ends an January 1, 2020](https://www.python.org/dev/peps/pep-0373/#update). [As announced earlier](https://groups.google.com/a/tensorflow.org/d/msg/announce/gVwS5RC8mds/dCt1ka2XAAAJ), TensorFlow will also stop supporting Python 2 starting January 1, 2020, and no more releases are expected in 2019. From 8f63e2bd523d231d6013f145c6c682804ae1afdb Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Sat, 9 May 2020 09:53:44 -0700 Subject: [PATCH 013/243] Update version numbers to 2.1.1 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 10d6b545b2a254..5460bceaab1ed0 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 1 -#define TF_PATCH_VERSION 0 +#define TF_PATCH_VERSION 1 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 740f24ec4a4afe..de1d35be4f0040 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -54,7 +54,7 @@ def register_extension_info(**kwargs): # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.1.0" +VERSION = "2.1.1" VERSION_MAJOR = VERSION.split(".")[0] def if_v2(a): diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index c1cc78e3269300..2de9cc5f105846 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -47,7 +47,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.1.0' +_VERSION = '2.1.1' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 6446439b4c681c3d35c89d1efd379e62c8c26806 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 7 Jan 2020 14:48:25 -0800 Subject: [PATCH 014/243] [Tensorflow Metrics] Add tracker for enable_v2_behavior() and disable_v2_behavior. DOC=https://docs.google.com/document/d/1Xk21sJBxtJAUvvLtQSYopiBdHe_8wUzv4kiICVYz8Jg/edit#heading=h.ng49b3y8n8x4 PiperOrigin-RevId: 288577048 Change-Id: Icdeb43ed4ec4606fdcd2da2930d5018042e72a4c --- tensorflow/python/compat/BUILD | 1 + tensorflow/python/compat/v2_compat.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/compat/BUILD b/tensorflow/python/compat/BUILD index 78f14631055b5d..cac04d9eb00dcb 100644 --- a/tensorflow/python/compat/BUILD +++ b/tensorflow/python/compat/BUILD @@ -15,6 +15,7 @@ py_library( "//tensorflow/python:control_flow_v2_toggles", "//tensorflow/python:tf2", "//tensorflow/python:util", + "//tensorflow/python/eager:monitoring", ], ) diff --git a/tensorflow/python/compat/v2_compat.py b/tensorflow/python/compat/v2_compat.py index 6c16e600d74636..c563a215c102bc 100644 --- a/tensorflow/python/compat/v2_compat.py +++ b/tensorflow/python/compat/v2_compat.py @@ -25,6 +25,7 @@ from tensorflow.python.data.experimental.ops import readers as exp_readers from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers +from tensorflow.python.eager import monitoring from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import control_flow_v2_toggles @@ -32,6 +33,11 @@ from tensorflow.python.util.tf_export import tf_export +# Metrics to track the status of v2_behavior +_v2_behavior_usage_gauge = monitoring.BoolGauge( + "/tensorflow/version/v2_behavior", + "whether v2_behavior is enabled or disabled", "status") + @tf_export(v1=["enable_v2_behavior"]) def enable_v2_behavior(): @@ -45,6 +51,7 @@ def enable_v2_behavior(): This function is called in the main TensorFlow `__init__.py` file, user should not need to call it, except during complex migrations. """ + _v2_behavior_usage_gauge.get_cell("enable").set(True) # TF2 behavior is enabled if either 1) enable_v2_behavior() is called or # 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case, # the modules below independently check if tf2.enabled(). @@ -82,7 +89,7 @@ def disable_v2_behavior(): User can call this function to disable 2.x behavior during complex migrations. """ - tf2.disable() + _v2_behavior_usage_gauge.get_cell("disable").set(True) ops.disable_eager_execution() tensor_shape.disable_v2_tensorshape() # Also switched by tf2 variable_scope.disable_resource_variables() From 19ecdfd02209558163355e8f7939f44cc12bd46c Mon Sep 17 00:00:00 2001 From: Rohan Jain Date: Wed, 4 Mar 2020 21:14:55 -0800 Subject: [PATCH 015/243] Disabling v2 in the disable_v2_behavior() method PiperOrigin-RevId: 299012811 Change-Id: I311f4b8a6ecbabb658717fff248bea072ff7a366 --- tensorflow/python/compat/v2_compat.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/python/compat/v2_compat.py b/tensorflow/python/compat/v2_compat.py index c563a215c102bc..eac841fb2fee92 100644 --- a/tensorflow/python/compat/v2_compat.py +++ b/tensorflow/python/compat/v2_compat.py @@ -90,6 +90,7 @@ def disable_v2_behavior(): User can call this function to disable 2.x behavior during complex migrations. """ _v2_behavior_usage_gauge.get_cell("disable").set(True) + tf2.disable() ops.disable_eager_execution() tensor_shape.disable_v2_tensorshape() # Also switched by tf2 variable_scope.disable_resource_variables() From 2cf567d4d17260cda4c26d52e49aacfecb8dcfcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franc=CC=A7ois=20Chollet?= Date: Wed, 25 Mar 2020 15:11:07 -0700 Subject: [PATCH 016/243] Make tf2.enabled() default to True. --- tensorflow/python/tf2.py | 2 +- tensorflow/tools/api/tests/module_test.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/tf2.py b/tensorflow/python/tf2.py index fd1c8c1757a7a6..caf3c78648027d 100644 --- a/tensorflow/python/tf2.py +++ b/tensorflow/python/tf2.py @@ -43,6 +43,6 @@ def disable(): def enabled(): """Returns True iff TensorFlow 2.0 behavior should be enabled.""" if _force_enable is None: - return os.getenv("TF2_BEHAVIOR", "0") != "0" + return os.getenv("TF2_BEHAVIOR", "1") == "1" else: return _force_enable diff --git a/tensorflow/tools/api/tests/module_test.py b/tensorflow/tools/api/tests/module_test.py index 1732ba41e70b56..bf4e462391d127 100644 --- a/tensorflow/tools/api/tests/module_test.py +++ b/tensorflow/tools/api/tests/module_test.py @@ -24,6 +24,7 @@ import tensorflow as tf from tensorflow.python import tf2 +from tensorflow.python.keras import layers from tensorflow.python.platform import test @@ -79,6 +80,15 @@ def testSummaryMerged(self): tf.summary.FileWriter # pylint: enable=pointless-statement + def testInternalKerasImport(self): + normalization_parent = layers.BatchNormalization.__module__.split('.')[-1] + if tf._major_api_version == 2: + self.assertEqual('normalization_v2', normalization_parent) + self.assertTrue(layers.BatchNormalization._USE_V2_BEHAVIOR) + else: + self.assertEqual('normalization', normalization_parent) + self.assertFalse(layers.BatchNormalization._USE_V2_BEHAVIOR) + if __name__ == '__main__': test.main() From 5df01bc5af4bfaa955eb7ed330cfaaebe96bb109 Mon Sep 17 00:00:00 2001 From: Anna R Date: Sat, 21 Dec 2019 11:02:47 -0800 Subject: [PATCH 017/243] Add _major_api_version to top level __init__.py file to tell when we import tensorflow version 1 or version 2 api. + Minor change to the way root_init_template flag is passed in (now it should be a location path instead of file name). PiperOrigin-RevId: 286729526 Change-Id: I55ebaa0cfe0fe3db3f4d1e699082b1f7b11df4da --- tensorflow/BUILD | 4 ++-- tensorflow/api_template.__init__.py | 1 + tensorflow/api_template_v1.__init__.py | 2 ++ tensorflow/python/tools/api/generator/api_gen.bzl | 4 ++-- tensorflow/tools/api/tests/api_compatibility_test.py | 4 +++- tensorflow/tools/api/tests/module_test.py | 2 +- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tensorflow/BUILD b/tensorflow/BUILD index 2ccb9854622282..7e87e58d37e1bf 100644 --- a/tensorflow/BUILD +++ b/tensorflow/BUILD @@ -875,7 +875,7 @@ gen_api_init_files( output_files = TENSORFLOW_API_INIT_FILES_V1, output_package = "tensorflow._api.v1", root_file_name = "v1.py", - root_init_template = "api_template_v1.__init__.py", + root_init_template = "$(location api_template_v1.__init__.py)", ) gen_api_init_files( @@ -898,7 +898,7 @@ gen_api_init_files( output_files = TENSORFLOW_API_INIT_FILES_V2, output_package = "tensorflow._api.v2", root_file_name = "v2.py", - root_init_template = "api_template.__init__.py", + root_init_template = "$(location api_template.__init__.py)", ) py_library( diff --git a/tensorflow/api_template.__init__.py b/tensorflow/api_template.__init__.py index c515cc76b9aacd..2a53f973f32f49 100644 --- a/tensorflow/api_template.__init__.py +++ b/tensorflow/api_template.__init__.py @@ -89,6 +89,7 @@ # Enable TF2 behaviors from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top _compat.enable_v2_behavior() +_major_api_version = 2 # Load all plugin libraries from site-packages/tensorflow-plugins if we are diff --git a/tensorflow/api_template_v1.__init__.py b/tensorflow/api_template_v1.__init__.py index 2b2899c3fe031e..b6dba2d35dae9a 100644 --- a/tensorflow/api_template_v1.__init__.py +++ b/tensorflow/api_template_v1.__init__.py @@ -104,6 +104,8 @@ _current_module.app.flags = flags # pylint: disable=undefined-variable setattr(_current_module, "flags", flags) +_major_api_version = 1 + # Load all plugin libraries from site-packages/tensorflow-plugins if we are # running under pip. # TODO(gunan): Enable setting an environment variable to define arbitrary plugin diff --git a/tensorflow/python/tools/api/generator/api_gen.bzl b/tensorflow/python/tools/api/generator/api_gen.bzl index b567a2291774fb..6595960c34107a 100644 --- a/tensorflow/python/tools/api/generator/api_gen.bzl +++ b/tensorflow/python/tools/api/generator/api_gen.bzl @@ -84,10 +84,10 @@ def gen_api_init_files( """ root_init_template_flag = "" if root_init_template: - root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")" + root_init_template_flag = "--root_init_template=" + root_init_template primary_package = packages[0] - api_gen_binary_target = ("create_" + primary_package + "_api_%d_%s") % (api_version, name) + api_gen_binary_target = ("create_" + primary_package + "_api_%s") % name native.py_binary( name = api_gen_binary_target, srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"], diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py index 383dbb4ab1f94f..321fc381290a34 100644 --- a/tensorflow/tools/api/tests/api_compatibility_test.py +++ b/tensorflow/tools/api/tests/api_compatibility_test.py @@ -367,7 +367,9 @@ def _ReadFileToProto(filename): api_version=api_version) def testAPIBackwardsCompatibility(self): - api_version = 2 if '_api.v2' in tf.bitwise.__name__ else 1 + api_version = 1 + if hasattr(tf, '_major_api_version') and tf._major_api_version == 2: + api_version = 2 golden_file_pattern = os.path.join( resource_loader.get_root_dir_with_all_resources(), _KeyToFilePath('*', api_version)) diff --git a/tensorflow/tools/api/tests/module_test.py b/tensorflow/tools/api/tests/module_test.py index bf4e462391d127..692ce94d006041 100644 --- a/tensorflow/tools/api/tests/module_test.py +++ b/tensorflow/tools/api/tests/module_test.py @@ -74,7 +74,7 @@ def testSummaryMerged(self): tf.summary.image # If we use v2 API, check for create_file_writer, # otherwise check for FileWriter. - if '._api.v2' in tf.bitwise.__name__: + if hasattr(tf, '_major_api_version') and tf._major_api_version == 2: tf.summary.create_file_writer else: tf.summary.FileWriter From 9ee266b0648a1862414b10f901850479698d812c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 12 May 2020 12:46:48 -0700 Subject: [PATCH 018/243] Update RELEASE.md --- RELEASE.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 8600767bd14e70..65943834236909 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,11 @@ # Release 2.1.1 - +## Bug Fixes and Other Changes +* Updates `sqlite3` to `3.31.01` to handle [CVE-2019-19880](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19880), [CVE-2019-19244](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19244) and [CVE-2019-19645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19645) +* Updates `curl` to `7.69.1` to handle [CVE-2019-15601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-15601) +* Updates `libjpeg-turbo` to `2.0.4` to handle [CVE-2018-19664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-19664), [CVE-2018-20330](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20330) and [CVE-2019-13960](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13960) +* Updates Apache Spark to `2.4.5` to handle [CVE-2019-10099](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-10099), [CVE-2018-17190](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17190) and [CVE-2018-11770](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11770) +* Fixes a versioning bug which causes Keras layers from TF 1.x to be used instead of those from TF 2.x # Release 2.1.0 From 721f6e6c9462b253d8db2297635b5b4ac9cffe26 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 12 May 2020 17:21:46 -0700 Subject: [PATCH 019/243] Pin keras_preprocessing to 1.1.0 instead of accidentally installing 1.1.1 during test --- tensorflow/tools/pip_package/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 2de9cc5f105846..cf796b4e61cf3e 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -57,7 +57,7 @@ 'gast == 0.2.2', 'google_pasta >= 0.1.6', 'keras_applications >= 1.0.8', - 'keras_preprocessing >= 1.1.0', + 'keras_preprocessing == 1.1.0', 'numpy >= 1.16.0, < 2.0', 'opt_einsum >= 2.3.2', 'protobuf >= 3.8.0', From d0106f72ea1f5a859dcd7a93674fce2a54890b07 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 13 May 2020 18:33:18 -0700 Subject: [PATCH 020/243] Fix tests when `tf._major_api_version` does not exist --- tensorflow/tools/api/tests/module_test.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tensorflow/tools/api/tests/module_test.py b/tensorflow/tools/api/tests/module_test.py index 692ce94d006041..c0870e6c134cb3 100644 --- a/tensorflow/tools/api/tests/module_test.py +++ b/tensorflow/tools/api/tests/module_test.py @@ -74,20 +74,22 @@ def testSummaryMerged(self): tf.summary.image # If we use v2 API, check for create_file_writer, # otherwise check for FileWriter. - if hasattr(tf, '_major_api_version') and tf._major_api_version == 2: - tf.summary.create_file_writer - else: - tf.summary.FileWriter + if hasattr(tf, '_major_api_version'): + if tf._major_api_version == 2: + tf.summary.create_file_writer + else: + tf.summary.FileWriter # pylint: enable=pointless-statement def testInternalKerasImport(self): normalization_parent = layers.BatchNormalization.__module__.split('.')[-1] - if tf._major_api_version == 2: - self.assertEqual('normalization_v2', normalization_parent) - self.assertTrue(layers.BatchNormalization._USE_V2_BEHAVIOR) - else: - self.assertEqual('normalization', normalization_parent) - self.assertFalse(layers.BatchNormalization._USE_V2_BEHAVIOR) + if hasattr(tf, '_major_api_version'): + if tf._major_api_version == 2: + self.assertEqual('normalization_v2', normalization_parent) + self.assertTrue(layers.BatchNormalization._USE_V2_BEHAVIOR) + else: + self.assertEqual('normalization', normalization_parent) + self.assertFalse(layers.BatchNormalization._USE_V2_BEHAVIOR) if __name__ == '__main__': From dcb6a33c40c2f6999a6e5c9a497f4a87b5136c3e Mon Sep 17 00:00:00 2001 From: Terry Heo Date: Tue, 11 Feb 2020 22:08:31 -0800 Subject: [PATCH 021/243] Update Makefile of TFLite not to include main() function Following files are excluded. - tensorflow/lite/experimental/ruy/tune_tool.cc - tensorflow/lite/tools/make/downloads/absl/absl/hash/internal/print_hash_of.cc PiperOrigin-RevId: 294592839 Change-Id: Ia6856ca6e6a018923f7f8c1f9ab427c00152370f --- tensorflow/lite/tools/make/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile index f67094f37b409f..da9e2bdd92004b 100644 --- a/tensorflow/lite/tools/make/Makefile +++ b/tensorflow/lite/tools/make/Makefile @@ -129,6 +129,8 @@ $(wildcard tensorflow/lite/*/*/*test.cc) \ $(wildcard tensorflow/lite/*/*/*/*test.cc) \ $(wildcard tensorflow/lite/kernels/*test_main.cc) \ $(wildcard tensorflow/lite/kernels/*test_util*.cc) \ +tensorflow/lite/experimental/ruy/tune_tool.cc \ +tensorflow/lite/tools/make/downloads/absl/absl/hash/internal/print_hash_of.cc \ $(MINIMAL_SRCS) BUILD_WITH_MMAP ?= true From 7f190ecb9f4298f1ece101e2a6c39294befa6ed5 Mon Sep 17 00:00:00 2001 From: Vincent ABRIOU Date: Mon, 10 Feb 2020 11:46:28 +0100 Subject: [PATCH 022/243] TFLite: tools: make: remove hash and flags files from the build sources for libtensorflow-lite.a By a file dependency game, hashtablez_sampler.cc and flags files include the absl/synchronization/mutex.h file but all files related to absl synchronization are not part of the build and as a result it generates a linking issue while linking the generated libtensorflow-lite.a library into a C/C++ application. It is not obvious that hash and flags files are needed so simply remove them from the list of the build sources. Signed-off-by: Vincent ABRIOU --- tensorflow/lite/tools/make/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile index da9e2bdd92004b..4e8df02ac62590 100644 --- a/tensorflow/lite/tools/make/Makefile +++ b/tensorflow/lite/tools/make/Makefile @@ -115,7 +115,7 @@ tensorflow/lite/tools/make/downloads/fft2d/fftsg.c \ tensorflow/lite/tools/make/downloads/flatbuffers/src/util.cpp CORE_CC_ALL_SRCS += \ $(shell find tensorflow/lite/tools/make/downloads/absl/absl/ \ - -type f -name \*.cc | grep -v test | grep -v benchmark | grep -v synchronization | grep -v debugging) + -type f -name \*.cc | grep -v test | grep -v benchmark | grep -v synchronization | grep -v debugging | grep -v hash | grep -v flags) endif # Remove any duplicates. CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS)) @@ -130,7 +130,6 @@ $(wildcard tensorflow/lite/*/*/*/*test.cc) \ $(wildcard tensorflow/lite/kernels/*test_main.cc) \ $(wildcard tensorflow/lite/kernels/*test_util*.cc) \ tensorflow/lite/experimental/ruy/tune_tool.cc \ -tensorflow/lite/tools/make/downloads/absl/absl/hash/internal/print_hash_of.cc \ $(MINIMAL_SRCS) BUILD_WITH_MMAP ?= true From 5b1fbe02689875b02b373d0aaea27c45ae30741d Mon Sep 17 00:00:00 2001 From: Vincent ABRIOU Date: Tue, 11 Feb 2020 10:06:13 +0100 Subject: [PATCH 023/243] TFLite: tools: make: add fftsg2d.c file in the build resources for libtensorflow-lite.a lite/kernels/rfft2d.cc has reference to rdft2d. As a consequence, libtensorflow-lite.a need to include fft2d/fftsg2d.c source in its build. If fftsg2d.c is not part of libtensorflow-lite.a, a C/C++ application that use the libtensorflow-lite.a static library is not able to link with the following error: rfft2d.cc:(.text+0x594): undefined reference to `rdft2d' Signed-off-by: Vincent ABRIOU --- tensorflow/lite/tools/make/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile index 4e8df02ac62590..1aac9e28d7ee68 100644 --- a/tensorflow/lite/tools/make/Makefile +++ b/tensorflow/lite/tools/make/Makefile @@ -112,6 +112,7 @@ $(wildcard tensorflow/lite/kernels/internal/reference/*.cc) \ $(PROFILER_SRCS) \ tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc \ tensorflow/lite/tools/make/downloads/fft2d/fftsg.c \ +tensorflow/lite/tools/make/downloads/fft2d/fftsg2d.c \ tensorflow/lite/tools/make/downloads/flatbuffers/src/util.cpp CORE_CC_ALL_SRCS += \ $(shell find tensorflow/lite/tools/make/downloads/absl/absl/ \ From e7661b6437a3ca18707e8488d606f5c921744904 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Thu, 21 May 2020 22:21:22 +0000 Subject: [PATCH 024/243] Relocatable ROCm changes --- configure.py | 7 ++- tensorflow/tools/ci_build/Dockerfile.rocm | 21 ++++++- third_party/gpus/rocm/rocm_config.h.tpl | 2 +- third_party/gpus/rocm_configure.bzl | 76 ++++++++++++----------- 4 files changed, 66 insertions(+), 40 deletions(-) diff --git a/configure.py b/configure.py index 8ec47294b47e88..4d1964f427c7ce 100644 --- a/configure.py +++ b/configure.py @@ -1170,7 +1170,8 @@ def system_specific_test_config(env): write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac') write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac') elif is_linux(): - if env.get('TF_NEED_CUDA', None) == '1': + if ((env.get('TF_NEED_CUDA', None) == '1') or + (env.get('TF_NEED_ROCM', None) == '1')): write_to_bazelrc('test --test_tag_filters=-no_gpu') write_to_bazelrc('test --build_tag_filters=-no_gpu') write_to_bazelrc('test --test_env=LD_LIBRARY_PATH') @@ -1414,6 +1415,10 @@ def main(): write_action_env_to_bazelrc('LD_LIBRARY_PATH', environ_cp.get('LD_LIBRARY_PATH')) + if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('ROCM_PATH')): + write_action_env_to_bazelrc('ROCM_PATH', environ_cp.get('ROCM_PATH')) + write_action_env_to_bazelrc('ROCM_ROOT', environ_cp.get('ROCM_PATH')) + environ_cp['TF_NEED_CUDA'] = str( int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False))) if (environ_cp.get('TF_NEED_CUDA') == '1' and diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index 70029d2a9a90cc..130d198ece08e6 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -62,6 +62,7 @@ RUN apt-get update --allow-insecure-repositories && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* +# Set up paths ENV HCC_HOME=$ROCM_PATH/hcc ENV HIP_PATH=$ROCM_PATH/hip ENV OPENCL_ROOT=$ROCM_PATH/opencl @@ -70,7 +71,21 @@ ENV PATH="$ROCM_PATH/bin:${PATH}" ENV PATH="$OPENCL_ROOT/bin:${PATH}" # Add target file to help determine which device(s) to build for -RUN bash -c 'echo -e "gfx803\ngfx900\ngfx906" >> /opt/rocm/bin/target.lst' +RUN bash -c 'echo -e "gfx803\ngfx900\ngfx906" >> ${ROCM_PATH}/bin/target.lst' + +# Need to explicitly create the $ROCM_PATH/.info/version file to workaround what seems to be a bazel bug +# The env vars being set via --action_env in .bazelrc and .tf_configure.bazelrc files are sometimes +# not getting set in the build command being spawned by bazel (in theory this should not happen) +# As a consequence ROCM_PATH is sometimes not set for the hipcc commands. +# When hipcc incokes hcc, it specifies $ROCM_PATH/.../include dirs via the `-isystem` options +# If ROCM_PATH is not set, it defaults to /opt/rocm, and as a consequence a dependency is generated on the +# header files included within `/opt/rocm`, which then leads to bazel dependency errors +# Explicitly creating the $ROCM_PATH/.info/version allows ROCM path to be set correrctly, even when ROCM_PATH +# is not explicitly set, and thus avoids the eventual bazel dependency error. +# The bazel bug needs to be root-caused and addressed, but that is out of our control and may take a long time +# to come to fruition, so implementing the workaround to make do till then +# Filed https://github.com/bazelbuild/bazel/issues/11163 for tracking this +RUN touch ${ROCM_PATH}/.info/version # Copy and run the install scripts. COPY install/*.sh /install/ @@ -89,3 +104,7 @@ COPY install/.bazelrc /etc/bazel.bazelrc # Configure the build for our ROCm configuration. ENV TF_NEED_ROCM 1 +# This is a temporary workaround to fix Out-Of-Memory errors we are running into with XLA perf tests +# By default, HIP runtime "hides" 256MB from the TF Runtime, but with recent changes (update to ROCm2.3, dynamic loading of roc* libs, et al) +# it seems that we need to up the threshold slightly to 320MB +ENV HIP_HIDDEN_FREE_MEM=320 diff --git a/third_party/gpus/rocm/rocm_config.h.tpl b/third_party/gpus/rocm/rocm_config.h.tpl index c5f25a845cae13..957413b9acd734 100644 --- a/third_party/gpus/rocm/rocm_config.h.tpl +++ b/third_party/gpus/rocm/rocm_config.h.tpl @@ -16,6 +16,6 @@ limitations under the License. #ifndef ROCM_ROCM_CONFIG_H_ #define ROCM_ROCM_CONFIG_H_ -#define TF_ROCM_TOOLKIT_PATH "/opt/rocm" +#define TF_ROCM_TOOLKIT_PATH "%{rocm_toolkit_path}" #endif // ROCM_ROCM_CONFIG_H_ diff --git a/third_party/gpus/rocm_configure.bzl b/third_party/gpus/rocm_configure.bzl index 8a8728fa4f7f16..7f89b3ccbb4d3f 100644 --- a/third_party/gpus/rocm_configure.bzl +++ b/third_party/gpus/rocm_configure.bzl @@ -22,7 +22,7 @@ load( _GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH" _GCC_HOST_COMPILER_PREFIX = "GCC_HOST_COMPILER_PREFIX" -_ROCM_TOOLKIT_PATH = "ROCM_TOOLKIT_PATH" +_ROCM_TOOLKIT_PATH = "ROCM_PATH" _TF_ROCM_VERSION = "TF_ROCM_VERSION" _TF_MIOPEN_VERSION = "TF_MIOPEN_VERSION" _TF_ROCM_AMDGPU_TARGETS = "TF_ROCM_AMDGPU_TARGETS" @@ -192,55 +192,55 @@ def _rocm_include_path(repository_ctx, rocm_config): inc_dirs.append(rocm_config.rocm_toolkit_path + "/include") # Add HSA headers - inc_dirs.append("/opt/rocm/hsa/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hsa/include") # Add HIP headers - inc_dirs.append("/opt/rocm/include/hip") - inc_dirs.append("/opt/rocm/include/hip/hcc_detail") - inc_dirs.append("/opt/rocm/hip/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/include/hip") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/include/hip/hcc_detail") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hip/include") # Add HIP-Clang headers - inc_dirs.append("/opt/rocm/llvm/lib/clang/8.0/include") - inc_dirs.append("/opt/rocm/llvm/lib/clang/9.0.0/include") - inc_dirs.append("/opt/rocm/llvm/lib/clang/10.0.0/include") - inc_dirs.append("/opt/rocm/llvm/lib/clang/11.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/8.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/9.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/10.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/11.0.0/include") # Add rocrand and hiprand headers - inc_dirs.append("/opt/rocm/rocrand/include") - inc_dirs.append("/opt/rocm/hiprand/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/rocrand/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hiprand/include") # Add rocfft headers - inc_dirs.append("/opt/rocm/rocfft/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/rocfft/include") # Add rocBLAS headers - inc_dirs.append("/opt/rocm/rocblas/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/rocblas/include") # Add MIOpen headers - inc_dirs.append("/opt/rocm/miopen/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/miopen/include") # Add RCCL headers - inc_dirs.append("/opt/rocm/rccl/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/rccl/include") # Add hcc headers - inc_dirs.append("/opt/rocm/hcc/include") - inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/7.0.0/include/") - inc_dirs.append("/opt/rocm/hcc/lib/clang/7.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/compiler/lib/clang/7.0.0/include/") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/lib/clang/7.0.0/include") # Newer hcc builds use/are based off of clang 8.0.0. - inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/8.0.0/include/") - inc_dirs.append("/opt/rocm/hcc/lib/clang/8.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/compiler/lib/clang/8.0.0/include/") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/lib/clang/8.0.0/include") # Support hcc based off clang 9.0.0, included in ROCm2.2 - inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/9.0.0/include/") - inc_dirs.append("/opt/rocm/hcc/lib/clang/9.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/compiler/lib/clang/9.0.0/include/") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/lib/clang/9.0.0/include") # Support hcc based off clang 10.0.0, included in ROCm2.8 - inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/10.0.0/include/") - inc_dirs.append("/opt/rocm/hcc/lib/clang/10.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/compiler/lib/clang/10.0.0/include/") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/lib/clang/10.0.0/include") # Support hcc based off clang 11.0.0, included in ROCm3.1 - inc_dirs.append("/opt/rocm/hcc/compiler/lib/clang/11.0.0/include/") - inc_dirs.append("/opt/rocm/hcc/lib/clang/11.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/compiler/lib/clang/11.0.0/include/") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/hcc/lib/clang/11.0.0/include") return inc_dirs @@ -306,11 +306,12 @@ def _hipcc_env(repository_ctx): repository_ctx.os.environ[name].strip() + "\";") return hipcc_env.strip() -def _hipcc_is_hipclang(repository_ctx): +def _hipcc_is_hipclang(repository_ctx,rocm_config): """Returns if hipcc is based on hip-clang toolchain. Args: repository_ctx: The repository context. + rocm_config: The path to the hip compiler. Returns: A string "True" if hipcc is based on hip-clang toolchain. @@ -325,7 +326,7 @@ def _hipcc_is_hipclang(repository_ctx): # grep for "HIP_COMPILER=clang" in /opt/rocm/hip/lib/.hipInfo grep_result = _execute( repository_ctx, - ["grep", "HIP_COMPILER=clang", "/opt/rocm/hip/lib/.hipInfo"], + ["grep", "HIP_COMPILER=clang", rocm_config.rocm_toolkit_path + "/hip/lib/.hipInfo"], empty_stdout_fine = True, ) result = grep_result.stdout.strip() @@ -333,13 +334,14 @@ def _hipcc_is_hipclang(repository_ctx): return "True" return "False" -def _if_hipcc_is_hipclang(repository_ctx, if_true, if_false = []): +def _if_hipcc_is_hipclang(repository_ctx, rocm_config, if_true, if_false = []): """ Returns either the if_true or if_false arg based on whether hipcc is based on the hip-clang toolchain Args : repository_ctx: The repository context. + rocm_config: The path to the hip compiler. if_true : value to return if hipcc is hip-clang based if_false : value to return if hipcc is not hip-clang based (optional, defaults to empty list) @@ -347,7 +349,7 @@ def _if_hipcc_is_hipclang(repository_ctx, if_true, if_false = []): Returns : either the if_true arg or the of_False arg """ - if _hipcc_is_hipclang(repository_ctx) == "True": + if _hipcc_is_hipclang(repository_ctx,rocm_config) == "True": return if_true return if_false @@ -768,7 +770,7 @@ def _create_local_rocm_repository(repository_ctx): rocm_defines["%{host_compiler_prefix}"] = host_compiler_prefix - rocm_defines["%{linker_bin_path}"] = "/opt/rocm/hcc/compiler/bin" + rocm_defines["%{linker_bin_path}"] = rocm_config.rocm_toolkit_path + "/hcc/compiler/bin" # For gcc, do not canonicalize system header paths; some versions of gcc # pick the shortest possible path for system includes when creating the @@ -781,7 +783,7 @@ def _create_local_rocm_repository(repository_ctx): "-DTENSORFLOW_USE_ROCM=1", "-D__HIP_PLATFORM_HCC__", "-DEIGEN_USE_HIP", - ] + _if_hipcc_is_hipclang(repository_ctx, [ + ] + _if_hipcc_is_hipclang(repository_ctx, rocm_config, [ # # define "TENSORFLOW_COMPILER_IS_HIP_CLANG" when we are using clang # based hipcc to compile/build tensorflow @@ -823,14 +825,14 @@ def _create_local_rocm_repository(repository_ctx): "crosstool:clang/bin/crosstool_wrapper_driver_rocm", { "%{cpu_compiler}": str(cc), - "%{hipcc_path}": "/opt/rocm/bin/hipcc", + "%{hipcc_path}": rocm_config.rocm_toolkit_path + "/bin/hipcc", "%{hipcc_env}": _hipcc_env(repository_ctx), - "%{hipcc_is_hipclang}": _hipcc_is_hipclang(repository_ctx), - "%{rocr_runtime_path}": "/opt/rocm/lib", + "%{hipcc_is_hipclang}": _hipcc_is_hipclang(repository_ctx,rocm_config), + "%{rocr_runtime_path}": rocm_config.rocm_toolkit_path + "/lib", "%{rocr_runtime_library}": "hsa-runtime64", - "%{hip_runtime_path}": "/opt/rocm/hip/lib", + "%{hip_runtime_path}": rocm_config.rocm_toolkit_path + "/hip/lib", "%{hip_runtime_library}": "hip_hcc", - "%{hcc_runtime_path}": "/opt/rocm/hcc/lib", + "%{hcc_runtime_path}": rocm_config.rocm_toolkit_path + "/hcc/lib", "%{hcc_runtime_library}": "mcwamp", "%{crosstool_verbose}": _crosstool_verbose(repository_ctx), "%{gcc_host_compiler_path}": str(cc), From 67a86e11467eb3d7961ddb68d5193944d4fe9eb7 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 22 May 2020 13:21:17 +0000 Subject: [PATCH 025/243] Disabling 3 subtests within the //tensorflow/core/grappler/optimizers:constant_folding_test The "MulConvPushDownTest" subtest basically compares the result of the following graph, before and after optimizations ``` // Tests if the following rewrite is performed: // // * Conv2D // / \ / \ // c Conv2D --> x (c * filter) // / \ // x filter ``` In 3 variations of the above subtests, we get miscompares ``` [ FAILED ] 3 tests, listed below: [ FAILED ] ConstantFoldingTest.MulConvPushDownTest_Conv2D_ScalarConst [ FAILED ] ConstantFoldingTest.MulConvPushDownTest_Conv2D_SingletonConst [ FAILED ] ConstantFoldingTest.MulConvPushDownTest_Conv3D_NDHWC_1x1x3Const ``` The nature of the failure, is a miscompare in the output value ``` ... Expected equality of these values: a Which is: 31415.762 b Which is: 31415.771 ... Expected equality of these values: a Which is: 371645.47 b Which is: 371645.62 ... Expected equality of these values: a Which is: 238958.75 b Which is: 238958.83 ... ``` The differences are minor, and can be removed if we use non-irrational numbers for the filter values (currently filter[i] == sqrt(i)), but I think that change will be frowned upon. So taking the route of disabling these 3 variants on ROCm --- .../core/grappler/optimizers/constant_folding_test.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tensorflow/core/grappler/optimizers/constant_folding_test.cc b/tensorflow/core/grappler/optimizers/constant_folding_test.cc index 8bf543f914ddad..50b2c057f11606 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding_test.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding_test.cc @@ -548,6 +548,8 @@ TEST_F(ConstantFoldingTest, ConstantPushDownBiasAdd) { } } +// This test fails on ROCm platform (see commit message for details) +#ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_ScalarConst) { for (string data_format : { "NHWC", @@ -565,7 +567,10 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_ScalarConst) { /*expect_folded=*/true); } } +#endif +// This test fails on ROCm platform (see commit message for details) +#ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst) { for (string data_format : { "NHWC", @@ -585,6 +590,7 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst) { } } } +#endif TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst_ShapeMismatch) { @@ -668,6 +674,8 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_3x1Const) { } } +// This test fails on ROCm platform (see commit message for details) +#ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv3D_NDHWC_1x1x3Const) { MulConvPushDownTest( /*input_shape=*/{3, 3, 3, 3, 3}, @@ -678,6 +686,7 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv3D_NDHWC_1x1x3Const) { /*data_format=*/"NDHWC", /*expect_folded=*/true); } +#endif TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv3D_NCDHW_3x1x1x1Const) { MulConvPushDownTest( From 5a843a0fb6335821f45351525f5a2ff821e97cbb Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 22 May 2020 13:25:48 +0000 Subject: [PATCH 026/243] Enabling NCHW layout specific subtests on the ROCm platform --- .../optimizers/constant_folding_test.cc | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tensorflow/core/grappler/optimizers/constant_folding_test.cc b/tensorflow/core/grappler/optimizers/constant_folding_test.cc index 50b2c057f11606..616b91308e25a6 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding_test.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding_test.cc @@ -553,9 +553,9 @@ TEST_F(ConstantFoldingTest, ConstantPushDownBiasAdd) { TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_ScalarConst) { for (string data_format : { "NHWC", -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM }) { MulConvPushDownTest( /*input_shape=*/data_format == "NHWC" ? TensorShape{4, 10, 10, 3} @@ -574,9 +574,9 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_ScalarConst) { TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst) { for (string data_format : { "NHWC", -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM }) { for (auto mul_const_input_shape : {TensorShape{1}, TensorShape{1, 1, 1, 1}}) { @@ -596,9 +596,9 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst_ShapeMismatch) { for (string data_format : { "NHWC", -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM }) { MulConvPushDownTest( /*input_shape=*/data_format == "NHWC" ? TensorShape{4, 10, 10, 3} @@ -614,9 +614,9 @@ TEST_F(ConstantFoldingTest, TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_3x1x3Const) { for (auto data_format : { "NHWC", -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM }) { MulConvPushDownTest( /*input_shape=*/{3, 3, 3, 3}, @@ -641,7 +641,7 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_NHWC_VectorLikeConst) { } } -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_NCHW_VectorLikeConst) { for (auto mul_const_input_shape : {TensorShape{3}, TensorShape{3, 1, 1}, TensorShape{1, 3, 1, 1}}) { @@ -655,14 +655,14 @@ TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_NCHW_VectorLikeConst) { /*expect_folded=*/false); } } -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_3x1Const) { for (auto data_format : { "NHWC", -#if GOOGLE_CUDA +#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM }) { MulConvPushDownTest( /*input_shape=*/{3, 3, 3, 3}, From 86ed16986298d1d3e1bd2002164aa8edd2fc32af Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 22 May 2020 13:26:38 +0000 Subject: [PATCH 027/243] Disabling 3D Pooling specific subtests on the ROCm platform --- tensorflow/cc/gradients/nn_grad_test.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc index f5a09e09dcda3e..942ec08f451a2d 100644 --- a/tensorflow/cc/gradients/nn_grad_test.cc +++ b/tensorflow/cc/gradients/nn_grad_test.cc @@ -259,6 +259,9 @@ TEST_F(NNGradTest, MaxPoolGradV2Helper) { RunTest(x, x_init_value, y, y_shape); } +// TODO(rocm): +// Re-enable this test once 3D pooling is supported on ROCm platform +#ifndef TENSORFLOW_USE_ROCM TEST_F(NNGradTest, MaxPool3DGradHelper) { TensorShape x_shape({1, 3, 3, 3, 1}); TensorShape y_shape({1, 1, 1, 1, 1}); @@ -271,6 +274,7 @@ TEST_F(NNGradTest, MaxPool3DGradHelper) { SetRandomValuesForMaxPooling(&x_init_value); RunTest(x, x_init_value, y, y_shape); } +#endif TEST_F(NNGradTest, AvgPoolGradHelper) { TensorShape x_shape({1, 2, 2, 1}); @@ -283,6 +287,9 @@ TEST_F(NNGradTest, AvgPoolGradHelper) { RunTest(x, x_shape, y, y_shape); } +// TODO(rocm): +// Re-enable this test once 3D pooling is supported on ROCm platform +#ifndef TENSORFLOW_USE_ROCM TEST_F(NNGradTest, AvgPool3DGradHelper) { TensorShape x_shape({1, 3, 3, 3, 1}); TensorShape y_shape({1, 1, 1, 1, 1}); @@ -293,6 +300,7 @@ TEST_F(NNGradTest, AvgPool3DGradHelper) { auto y = AvgPool3D(scope_, x, ksize, strides, "SAME"); RunTest(x, x_shape, y, y_shape); } +#endif TEST_F(NNGradTest, LRN) { TensorShape x_shape({1, 1, 2, 1}); From dab722f8b3a661644a70c551c495d925997cf3fd Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 22 May 2020 13:31:28 +0000 Subject: [PATCH 028/243] Disabling non-GPU subtests on the ROCm platform --- .../collective_param_resolver_distributed_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc index aba84864f087eb..bf83e7962f1343 100644 --- a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc +++ b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc @@ -315,7 +315,7 @@ TEST_F(DeviceResDistTest, Workers2Devices2) { ValidateCollectiveParams(num_workers, num_devices); } -#ifndef GOOGLE_CUDA +#if !GOOGLE_CUDA && !TENSORFLOW_USE_ROCM namespace { // A mock NcclReducer for testing group runtime details initialization with CPU // builds. The only meaningful function in this class is From a76192b05cb47ec7991629561c8b71ce7614c634 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 22 May 2020 13:32:39 +0000 Subject: [PATCH 029/243] Minor tweaks to the ROCm CI test scripts --- .../tools/ci_build/linux/rocm/run_cc_core.sh | 24 +++++--- .../tools/ci_build/linux/rocm/run_py3_core.sh | 23 ++++++-- .../tools/ci_build/xla/linux/rocm/run_py3.sh | 56 ++++++++++++++++--- 3 files changed, 82 insertions(+), 21 deletions(-) diff --git a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh index 0286d0aea4c372..bcd7241d6ca537 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh @@ -35,10 +35,20 @@ export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py # Run bazel test command. Double test timeouts to avoid flakes. -bazel test --config=rocm --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test -k \ - --test_lang_filters=cc --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \ - --build_tests_only --test_output=errors --local_test_jobs=${TF_GPU_COUNT} --config=opt \ - --test_sharding_strategy=disabled \ - --test_size_filters=small,medium \ - --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \ - //tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/... +bazel test \ + --config=rocm \ + -k \ + --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ + --test_lang_filters=cc \ + --jobs=${N_JOBS} \ + --local_test_jobs=${TF_GPU_COUNT}\ + --test_timeout 600,900,2400,7200 \ + --build_tests_only \ + --test_output=errors \ + --test_sharding_strategy=disabled \ + --test_size_filters=small,medium,large \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- \ + //tensorflow/... \ + -//tensorflow/compiler/... \ + -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh index 424b3e6fa0a0b8..6169de0514a205 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh @@ -35,9 +35,20 @@ export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py # Run bazel test command. Double test timeouts to avoid flakes. -bazel test --config=rocm --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test -k \ - --test_lang_filters=py --jobs=${N_JOBS} --test_timeout 600,900,2400,7200 \ - --build_tests_only --test_output=errors --local_test_jobs=${TF_GPU_COUNT} --config=opt \ - --test_sharding_strategy=disabled \ - --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \ - //tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/... +bazel test \ + --config=rocm \ + -k \ + --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ + --test_lang_filters=py \ + --jobs=${N_JOBS} \ + --local_test_jobs=${TF_GPU_COUNT} \ + --test_timeout 600,900,2400,7200 \ + --build_tests_only \ + --test_output=errors \ + --test_sharding_strategy=disabled \ + --test_size_filters=small,medium \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- \ + //tensorflow/... \ + -//tensorflow/compiler/... \ + -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh index 72924fb1c44d1b..c2300d749c6ba5 100755 --- a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh +++ b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh @@ -27,6 +27,7 @@ echo "" # Run configure. export PYTHON_BIN_PATH=`which python3` +export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 export TF_GPU_COUNT=${N_GPUS} @@ -34,12 +35,51 @@ export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py echo "build --distinct_host_configuration=false" >> .tf_configure.bazelrc -bazel clean # Run bazel test command. Double test timeouts to avoid flakes. -bazel test --config=rocm --test_tag_filters=-no_gpu,-benchmark-test,-no_oss,-no_rocm -k \ - --jobs=${N_JOBS} --test_timeout 600,900,2400,7200 \ - --build_tests_only --test_output=errors --local_test_jobs=${TF_GPU_COUNT} \ - --test_sharding_strategy=disabled \ - --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ - --config=xla -- \ - //tensorflow/compiler/... +bazel test \ + --config=rocm \ + --config=xla \ + -k \ + --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ + --jobs=${N_JOBS} \ + --local_test_jobs=${TF_GPU_COUNT} \ + --test_timeout 600,900,2400,7200 \ + --build_tests_only \ + --test_output=errors \ + --test_sharding_strategy=disabled \ + --test_size_filters=small,medium \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- \ + //tensorflow/compiler/... \ + -//tensorflow/compiler/tests:dense_layer_test \ + -//tensorflow/compiler/tests:dense_layer_test_gpu \ + -//tensorflow/compiler/tests:jit_test \ + -//tensorflow/compiler/tests:jit_test_gpu \ + -//tensorflow/compiler/tests:matrix_triangular_solve_op_test \ + -//tensorflow/compiler/tests:tensor_array_ops_test \ + -//tensorflow/compiler/tests:xla_ops_test \ + -//tensorflow/compiler/xla/client/lib:svd_test \ + -//tensorflow/compiler/tests:lstm_test \ +&& bazel test \ + --config=rocm \ + --config=xla \ + -k \ + --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ + --jobs=${N_JOBS} \ + --local_test_jobs=${TF_GPU_COUNT} \ + --test_timeout 600,900,2400,7200 \ + --build_tests_only \ + --test_output=errors \ + --test_sharding_strategy=disabled \ + --test_env=TF2_BEHAVIOR=0 \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- \ + //tensorflow/compiler/tests:dense_layer_test \ + //tensorflow/compiler/tests:dense_layer_test_gpu \ + //tensorflow/compiler/tests:jit_test \ + //tensorflow/compiler/tests:jit_test_gpu \ + //tensorflow/compiler/tests:matrix_triangular_solve_op_test \ + //tensorflow/compiler/tests:tensor_array_ops_test \ + //tensorflow/compiler/tests:xla_ops_test \ + //tensorflow/compiler/xla/client/lib:svd_test \ + //tensorflow/compiler/tests:lstm_test From a4cd73b1ba24070176679e83ed5381d5859524fc Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Tue, 26 May 2020 01:00:12 +0000 Subject: [PATCH 030/243] Adding no_rocm tag to unit-tests currently failing on the ROCm platform --- tensorflow/python/BUILD | 8 +++++++- .../experimental/kernel_tests/serialization/BUILD | 1 + tensorflow/python/debug/BUILD | 2 ++ tensorflow/python/eager/BUILD | 1 + tensorflow/python/feature_column/BUILD | 2 ++ tensorflow/python/keras/BUILD | 13 +++++++++++-- tensorflow/python/ops/ragged/BUILD | 1 + tensorflow/python/tpu/BUILD | 2 ++ tensorflow/tools/compatibility/BUILD | 1 + tensorflow/tools/docs/BUILD | 1 + 10 files changed, 29 insertions(+), 3 deletions(-) diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index f2ca67521f257c..3586f48ee0ff96 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -2864,6 +2864,7 @@ tf_py_test( ":framework_for_generated_wrappers", "//third_party/py/numpy", ], + tags = ["no_rocm"], ) cuda_py_test( @@ -5909,6 +5910,7 @@ tf_py_test( "client_testlib", "framework_test_lib", ], + tags = ["no_rocm"], ) tf_py_test( @@ -5931,7 +5933,10 @@ tf_py_test( ":errors", ":lib", ], - tags = ["no_windows"], + tags = [ + "no_rocm", + "no_windows", + ], ) tf_py_test( @@ -6633,6 +6638,7 @@ tf_py_test( ":random_ops", ], main = "layers/pooling_test.py", + tags = ["no_rocm"], ) cuda_py_test( diff --git a/tensorflow/python/data/experimental/kernel_tests/serialization/BUILD b/tensorflow/python/data/experimental/kernel_tests/serialization/BUILD index 7770323fc487ee..89f618f1dafee2 100644 --- a/tensorflow/python/data/experimental/kernel_tests/serialization/BUILD +++ b/tensorflow/python/data/experimental/kernel_tests/serialization/BUILD @@ -105,6 +105,7 @@ py_test( srcs_version = "PY2AND3", tags = [ "no_pip", + "no_rocm", "no_windows", "notsan", ], diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD index 7eb9baac19b8e4..3b2687360f5aa4 100644 --- a/tensorflow/python/debug/BUILD +++ b/tensorflow/python/debug/BUILD @@ -878,6 +878,7 @@ py_test( srcs = ["wrappers/framework_test.py"], python_version = "PY2", srcs_version = "PY2AND3", + tags = ["no_rocm"], deps = [ ":debug_data", ":framework", @@ -1094,6 +1095,7 @@ py_test( srcs = ["cli/debugger_cli_common_test.py"], python_version = "PY2", srcs_version = "PY2AND3", + tags = ["no_rocm"], deps = [ ":debugger_cli_common", "//tensorflow/python:framework_test_lib", diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD index 5bc654c21849b4..199f3264a200ab 100644 --- a/tensorflow/python/eager/BUILD +++ b/tensorflow/python/eager/BUILD @@ -790,6 +790,7 @@ tpu_py_test( name = "remote_cloud_tpu_test", srcs = ["remote_cloud_tpu_test.py"], tags = [ + "no_rocm", "notap", ], deps = [ diff --git a/tensorflow/python/feature_column/BUILD b/tensorflow/python/feature_column/BUILD index 38c3657ef58ec8..355834b8d03422 100644 --- a/tensorflow/python/feature_column/BUILD +++ b/tensorflow/python/feature_column/BUILD @@ -113,6 +113,7 @@ tf_py_test( tags = [ "no_cuda_on_cpu_tap", "no_pip", + "no_rocm", "no_windows", ], ) @@ -163,6 +164,7 @@ tf_py_test( tags = [ "no_cuda_on_cpu_tap", "no_pip", + "no_rocm", "no_windows", ], ) diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index 05809accba348b..d68ff2a9f88581 100755 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -585,7 +585,10 @@ tf_py_test( "//tensorflow/python:nn_ops", ], shard_count = 16, - tags = ["notsan"], + tags = [ + "no_rocm", + "notsan", + ], ) tf_py_test( @@ -750,6 +753,7 @@ tf_py_test( "//tensorflow/python:client_testlib", ], shard_count = 11, + tags = ["no_rocm"], ) tf_py_test( @@ -1034,7 +1038,10 @@ tf_py_test( "//tensorflow/python:client_testlib", ], shard_count = 4, - tags = ["notsan"], # http://b/62136390 + tags = [ + "no_rocm", + "notsan", # http://b/62136390 + ], ) tf_py_test( @@ -1049,6 +1056,7 @@ tf_py_test( ], shard_count = 4, tags = [ + "no_rocm", "noasan", # times out b/63678675 "notsan", # http://b/62189182 ], @@ -1065,6 +1073,7 @@ tf_py_test( "//tensorflow/python:client_testlib", ], shard_count = 10, + tags = ["no_rocm"], ) cuda_py_test( diff --git a/tensorflow/python/ops/ragged/BUILD b/tensorflow/python/ops/ragged/BUILD index 0ab663dd347971..57655003df312c 100644 --- a/tensorflow/python/ops/ragged/BUILD +++ b/tensorflow/python/ops/ragged/BUILD @@ -977,6 +977,7 @@ py_test( srcs = ["ragged_map_fn_op_test.py"], python_version = "PY2", srcs_version = "PY2AND3", + tags = ["no_rocm"], deps = [ ":ragged", # fixdeps: keep ":ragged_factory_ops", diff --git a/tensorflow/python/tpu/BUILD b/tensorflow/python/tpu/BUILD index f2262c395f6611..e394964977d332 100644 --- a/tensorflow/python/tpu/BUILD +++ b/tensorflow/python/tpu/BUILD @@ -32,6 +32,7 @@ py_test( "no_oss_py2", "no_oss_py35", "no_pip", + "no_rocm", ], deps = [ "//tensorflow/python:client_testlib", @@ -76,6 +77,7 @@ tpu_py_test( size = "medium", srcs = ["async_checkpoint_test.py"], disable_experimental = True, + tags = ["no_rocm"], deps = [ ":async_checkpoint", ":tpu_estimator", diff --git a/tensorflow/tools/compatibility/BUILD b/tensorflow/tools/compatibility/BUILD index ea4d532091f05c..769abd60f2ca71 100644 --- a/tensorflow/tools/compatibility/BUILD +++ b/tensorflow/tools/compatibility/BUILD @@ -261,6 +261,7 @@ py_test( srcs = ["test_file_v2_0.py"], python_version = "PY3", srcs_version = "PY2AND3", + tags = ["no_rocm"], deps = [ "//tensorflow:tensorflow_py", ], diff --git a/tensorflow/tools/docs/BUILD b/tensorflow/tools/docs/BUILD index 68f04f20dc3c1e..6cdfa736581b45 100644 --- a/tensorflow/tools/docs/BUILD +++ b/tensorflow/tools/docs/BUILD @@ -16,6 +16,7 @@ py_test( python_version = "PY3", tags = [ "no_oss_py2", + "no_rocm", "noasan", "nomsan", "notsan", From f8963b6ca48856504e0b3cacfacaf4d5e6d6b7ba Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 27 May 2020 15:07:32 +0000 Subject: [PATCH 031/243] Updating ROCm Dockerfile + CI scripts to use ROCm 3.3 --- tensorflow/tools/ci_build/Dockerfile.rocm | 4 ++-- tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh | 1 + tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh | 1 + tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh | 1 + tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh | 1 + 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index 130d198ece08e6..6d124204ed82d9 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -3,8 +3,8 @@ FROM ubuntu:xenial MAINTAINER Jeff Poznanovic -ARG DEB_ROCM_REPO=http://repo.radeon.com/rocm/apt/2.8.0/ -ARG ROCM_PATH=/opt/rocm +ARG DEB_ROCM_REPO=http://repo.radeon.com/rocm/apt/3.3/ +ARG ROCM_PATH=/opt/rocm-3.3.0 ENV DEBIAN_FRONTEND noninteractive ENV TF_NEED_ROCM 1 diff --git a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh index bcd7241d6ca537..49b23363e4e7d9 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh @@ -30,6 +30,7 @@ export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 +export ROCM_PATH=/opt/rocm-3.3.0 export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py diff --git a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh index 61813dfde30e7a..5e481112f12a11 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh @@ -30,6 +30,7 @@ export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 +export ROCM_PATH=/opt/rocm-3.3.0 export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py diff --git a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh index 6169de0514a205..d55fa56f970f20 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh @@ -30,6 +30,7 @@ export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 +export ROCM_PATH=/opt/rocm-3.3.0 export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py diff --git a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh index c2300d749c6ba5..6ce1fad9cc7542 100755 --- a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh +++ b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh @@ -30,6 +30,7 @@ export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 +export ROCM_PATH=/opt/rocm-3.3.0 export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py From eed2b383c2270d0440af3b4b301fed1911f59315 Mon Sep 17 00:00:00 2001 From: TensorFlower Gardener Date: Tue, 23 Jun 2020 14:05:10 -0700 Subject: [PATCH 032/243] Merge pull request #40705 from Intel-tensorflow:chuanqiw/upgrade_sqlite PiperOrigin-RevId: 317934381 Change-Id: I95cdf789f7f5a89d75d45a1b6d67f1ad993cafab --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 0d94c21025132c..79b2bcf1c10298 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -275,12 +275,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "f3c79bc9f4162d0b06fa9fe09ee6ccd23bb99ce310b792c5145f87fbcc30efca", - strip_prefix = "sqlite-amalgamation-3310100", + sha256 = "e9cec01d4519e2d49b3810615237325263fe1feaceae390ee12b4a29bd73dbe2", + strip_prefix = "sqlite-amalgamation-3320300", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3310100.zip", - "https://www.sqlite.org/2020/sqlite-amalgamation-3310100.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", ], ) From eebfff6aee0f2742532757a574840cc1ba5780f6 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 14:31:44 +0000 Subject: [PATCH 033/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. This commit adds an alternative lookup path for `ld.lld` (since its location within the rocm install, will move in ROCm 3.5+). --- .../xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc index 84616f3a37b2d8..38d0a48aa08739 100644 --- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc +++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc @@ -620,8 +620,10 @@ StatusOr> EmitModuleToHsaco( // Locate lld. // TODO(whchung@gmail.com): change to tensorflow::ROCmRoot() after // ROCm-Device-Libs PR. - std::string lld_path = tensorflow::io::JoinPath("/opt/rocm", "hcc/bin"); - auto lld_program = llvm::sys::findProgramByName("ld.lld", {lld_path}); + std::string lld_path_1 = tensorflow::io::JoinPath("/opt/rocm", "hcc/bin"); + std::string lld_path_2 = tensorflow::io::JoinPath("/opt/rocm", "llvm/bin"); + auto lld_program = + llvm::sys::findProgramByName("ld.lld", {lld_path_1, lld_path_2}); if (!lld_program) { return xla::InternalError("unable to find ld.lld in PATH: %s", lld_program.getError().message()); From a2ea9dde0c661ba4a204993219a3c9e8039503cd Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 14:43:54 +0000 Subject: [PATCH 034/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. The hipcc in ROCm 3.5+ uses 256 as the default value for `__launch_bounds__` attribute value. So for kernels that will be invoked using a higher threads_per_block value, we need to explicitly specify the value for the `__launch_bounds__` attribute. This change affects the following unit-tests (i.e. makes them not regress with ROCm 3.5+) ``` //tensorflow/python/keras:model_subclassing_compiled_test //tensorflow/python/debug:check_numerics_callback_test_gpu ``` --- tensorflow/core/kernels/reduction_gpu_kernels.cu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/reduction_gpu_kernels.cu.h b/tensorflow/core/kernels/reduction_gpu_kernels.cu.h index e26b9fd5ad1c72..68a31e16286812 100644 --- a/tensorflow/core/kernels/reduction_gpu_kernels.cu.h +++ b/tensorflow/core/kernels/reduction_gpu_kernels.cu.h @@ -337,7 +337,7 @@ __global__ void ColumnReduceMax16ColumnsKernel( // Maps each block to a column range TF_RED_WARPSIZE wide template -__global__ void ColumnReduceKernel( +__global__ __launch_bounds__(1024) void ColumnReduceKernel( T in, OUT_T out, int num_rows, int num_cols, Op op, typename std::iterator_traits::value_type initVal) { typedef typename std::iterator_traits::value_type value_type; From c2f059d5234d2a80ca1944aafeac68109816e4b0 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 14:48:16 +0000 Subject: [PATCH 035/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. The path to the ROCDL files changes with ROCm 3.5, and hence this change. The macro TENSORFLOW_COMPILE_IS_HIP_CLANG is only true when compiling TF with ROCm 3.5 and higher. The macro is a temporary construct to aid with the transition. Once the transition is complete, it will removed and the code updated appropriately. --- tensorflow/core/platform/default/rocm_rocdl_path.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/platform/default/rocm_rocdl_path.cc b/tensorflow/core/platform/default/rocm_rocdl_path.cc index 0831544f616958..7331c6625a1c6a 100644 --- a/tensorflow/core/platform/default/rocm_rocdl_path.cc +++ b/tensorflow/core/platform/default/rocm_rocdl_path.cc @@ -36,7 +36,11 @@ string RocmRoot() { } string RocdlRoot() { +#if TENSORFLOW_COMPILER_IS_HIP_CLANG + return tensorflow::io::JoinPath(tensorflow::RocmRoot(), "lib"); +#else return tensorflow::io::JoinPath(tensorflow::RocmRoot(), "hcc/lib"); +#endif } } // namespace tensorflow From 3d94212a914751f535becaeacee7ea29f33d3ef1 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 14:57:33 +0000 Subject: [PATCH 036/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. ROCm 3.5+ (more specifically the hip-clang compiler) seems to be picky about not using a `__launch_bounds__` attribute value which is lower than the threads_per_warp value (64 on the ROCm platform). This change is to accomodate that pickiness. The macro TENSORFLOW_COMPILE_IS_HIP_CLANG is only true when compiling TF with ROCm 3.5 and higher. The macro is a temporary construct to aid with the transition. Once the transition is complete, it will removed and the code updated appropriately. --- tensorflow/core/kernels/scan_ops_gpu.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/scan_ops_gpu.h b/tensorflow/core/kernels/scan_ops_gpu.h index 27da21982af197..2fca556f2fc862 100644 --- a/tensorflow/core/kernels/scan_ops_gpu.h +++ b/tensorflow/core/kernels/scan_ops_gpu.h @@ -263,6 +263,12 @@ void LaunchScan(const GPUDevice& d, typename TTypes::ConstTensor in, int num_blocks = dimx * dimz; int ideal_block_size = dimy / items_per_thread; +#if TENSORFLOW_COMPILER_IS_HIP_CLANG + const int rocm_threads_per_warp = 64; + ideal_block_size = (ideal_block_size > rocm_threads_per_warp) + ? ideal_block_size + : rocm_threads_per_warp; +#endif // There seems to be a bug when the type is not float and block_size 1024. // Launch on the smallest power of 2 block size that we can. From 91aa814fd2c7efe08a011e052e7ba6169f5400a1 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 15:08:24 +0000 Subject: [PATCH 037/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. Disabling some unit-tests (by adding the no_rocm tag to them) because they regerss after the switch to ROCm 3.5+ ``` # XLA related failure //tensorflow/python/eager:def_function_xla_jit_test //tensorflow/python/keras:backend_test # eigen packet then scalar JIRA ticket 236756 //tensorflow/python/ops/ragged:ragged_dispatch_test # genuine miscompares //tensorflow/python/kernel_tests:bias_op_deterministic_test //tensorflow/python/kernel_tests:array_ops_test # Memory access fault by GPU ... Reason: Page not present or supervisor privilege. //tensorflow/python/ops/parallel_for:math_test ``` --- tensorflow/python/eager/BUILD | 1 + tensorflow/python/keras/BUILD | 1 + tensorflow/python/kernel_tests/BUILD | 2 ++ tensorflow/python/ops/parallel_for/BUILD | 5 ++++- tensorflow/python/ops/ragged/BUILD | 1 + 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD index 199f3264a200ab..47fc2dce1745e6 100644 --- a/tensorflow/python/eager/BUILD +++ b/tensorflow/python/eager/BUILD @@ -703,6 +703,7 @@ cuda_py_test( ], tags = [ "no_mac", + "no_rocm", "no_windows", ], xla_enabled = True, diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index d68ff2a9f88581..1a2bed91ded41d 100755 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -1821,6 +1821,7 @@ tf_py_test( "//tensorflow/python:util", ], shard_count = 4, + tags = ["no_rocm"], ) tf_py_test( diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index 6716597333966d..3ba9dbf376c356 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -1582,6 +1582,7 @@ cuda_py_test( ], shard_count = 10, tags = [ + "no_rocm", "noasan", # times out "optonly", # times out ], @@ -1677,6 +1678,7 @@ cuda_py_test( additional_deps = [ ":bias_op_base", ], + tags = ["no_rocm"], xla_enable_strict_auto_jit = False, ) diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD index dff4b92203616e..50c31f5c728d51 100644 --- a/tensorflow/python/ops/parallel_for/BUILD +++ b/tensorflow/python/ops/parallel_for/BUILD @@ -170,7 +170,10 @@ cuda_py_test( "//tensorflow/python:util", ], shard_count = 5, - tags = ["optonly"], # Too slow in non-opt mode + tags = [ + "no_rocm", + "optonly", # Too slow in non-opt mode + ], ) py_library( diff --git a/tensorflow/python/ops/ragged/BUILD b/tensorflow/python/ops/ragged/BUILD index 57655003df312c..f6549fc90d9b94 100644 --- a/tensorflow/python/ops/ragged/BUILD +++ b/tensorflow/python/ops/ragged/BUILD @@ -936,6 +936,7 @@ py_test( srcs = ["ragged_dispatch_test.py"], python_version = "PY2", srcs_version = "PY2AND3", + tags = ["no_rocm"], deps = [ ":ragged", # fixdeps: keep ":ragged_factory_ops", From 073aec301d9685f1435355fbd4c998c4f47d4d6d Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 23:45:06 +0000 Subject: [PATCH 038/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. This commit updates the ROCm crosstool wrapper/driver * to add a HIP-clang specific compiler option ( `-lrt` ) * remove the `gtest` + `gmock` headers from the ROCm install (so that the ones used by TF get picked up instead) This commit is already present in the `master` branch --- .../bin/crosstool_wrapper_driver_rocm.tpl | 4 +++- third_party/gpus/cuda_configure.bzl | 19 ++++++++++++++----- third_party/gpus/rocm_configure.bzl | 1 + 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl index f5ac7b39dfdb9a..89275128a9c6b0 100755 --- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl +++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl @@ -179,7 +179,7 @@ def InvokeHipcc(argv, log=False): # Also we need to retain warning about uninitialised shared variable as # warning only, even when -Werror option is specified. if HIPCC_IS_HIPCLANG: - hipccopts += ' --include=hip/hip_runtime.h -Wno-error=cuda-shared-init ' + hipccopts += ' --include=hip/hip_runtime.h ' hipccopts += ' ' + hipcc_compiler_options # Use -fno-gpu-rdc by default for early GPU kernel finalization # This flag would trigger GPU kernels be generated at compile time, instead @@ -258,6 +258,8 @@ def main(): gpu_linker_flags.append('-L' + HIP_RUNTIME_PATH) gpu_linker_flags.append('-Wl,-rpath=' + HIP_RUNTIME_PATH) gpu_linker_flags.append('-l' + HIP_RUNTIME_LIBRARY) + if HIPCC_IS_HIPCLANG: + gpu_linker_flags.append("-lrt") if VERBOSE: print(' '.join([CPU_COMPILER] + gpu_linker_flags)) return subprocess.call([CPU_COMPILER] + gpu_linker_flags) diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl index af1bc96f000e9c..9b9262f6327f51 100644 --- a/third_party/gpus/cuda_configure.bzl +++ b/third_party/gpus/cuda_configure.bzl @@ -932,23 +932,32 @@ def make_copy_files_rule(repository_ctx, name, srcs, outs): cmd = \"""%s \""", )""" % (name, "\n".join(outs), " && \\\n".join(cmds)) -def make_copy_dir_rule(repository_ctx, name, src_dir, out_dir): - """Returns a rule to recursively copy a directory.""" +def make_copy_dir_rule(repository_ctx, name, src_dir, out_dir, exceptions=None): + """Returns a rule to recursively copy a directory. + If exceptions is not None, it must be a list of files or directories in + 'src_dir'; these will be excluded from copying. + """ src_dir = _norm_path(src_dir) out_dir = _norm_path(out_dir) outs = _read_dir(repository_ctx, src_dir) + post_cmd='' + if exceptions!=None: + outs = [x for x in outs if not any([x.startswith(src_dir+"/"+y) + for y in exceptions])] outs = [(' "%s",' % out.replace(src_dir, out_dir)) for out in outs] - # '@D' already contains the relative path for a single file, see # http://docs.bazel.build/versions/master/be/make-variables.html#predefined_genrule_variables out_dir = "$(@D)/%s" % out_dir if len(outs) > 1 else "$(@D)" + if exceptions!=None: + for x in exceptions: + post_cmd+=" ; rm -fR " + out_dir + "/" + x return """genrule( name = "%s", outs = [ %s ], - cmd = \"""cp -rLf "%s/." "%s/" \""", -)""" % (name, "\n".join(outs), src_dir, out_dir) + cmd = \"""cp -rLf "%s/." "%s/" %s\""", +)""" % (name, "\n".join(outs), src_dir, out_dir, post_cmd) def _read_dir(repository_ctx, src_dir): """Returns a string with all files in a directory. diff --git a/third_party/gpus/rocm_configure.bzl b/third_party/gpus/rocm_configure.bzl index 7f89b3ccbb4d3f..dff65f19e0c724 100644 --- a/third_party/gpus/rocm_configure.bzl +++ b/third_party/gpus/rocm_configure.bzl @@ -686,6 +686,7 @@ def _create_local_rocm_repository(repository_ctx): name = "rocm-include", src_dir = rocm_toolkit_path + "/include", out_dir = "rocm/include", + exceptions = ["gtest", "gmock"], ), make_copy_dir_rule( repository_ctx, From 02306939d3f5b0cb4059d275dfec61d48dc3073d Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 24 Jun 2020 23:47:47 +0000 Subject: [PATCH 039/243] Prepping for switch to ROCm 3.5+ Starting with ROCm 3.5 the underlying compiler used by hipcc will change from HCC to hip-clang. There will be a corresponding change in the HIP Runtime as well. This commit is part of a series which are intended to make the transition to ROCm 3.5+ easier. This commit makes some minor tweaks. It is already present in the `master` branch --- .../xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc | 2 +- tensorflow/stream_executor/rocm/rocm_gpu_executor.cc | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc index 38d0a48aa08739..6969448b03008e 100644 --- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc +++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc @@ -687,7 +687,7 @@ std::unique_ptr AMDGPUGetTargetMachine( llvm::Triple target_triple, int amdgpu_version, const HloModuleConfig& hlo_module_config) { return GetTargetMachine(target_triple, absl::StrCat("gfx", amdgpu_version), - hlo_module_config, "-code-object-v3"); + hlo_module_config, "+code-object-v3"); } void AMDGPUBackendInit(const HloModuleConfig& hlo_module_config) { diff --git a/tensorflow/stream_executor/rocm/rocm_gpu_executor.cc b/tensorflow/stream_executor/rocm/rocm_gpu_executor.cc index e22a243a70bd3a..fd3b5f19913ba2 100644 --- a/tensorflow/stream_executor/rocm/rocm_gpu_executor.cc +++ b/tensorflow/stream_executor/rocm/rocm_gpu_executor.cc @@ -132,6 +132,11 @@ bool GpuExecutor::UnloadGpuBinary(const void* gpu_binary) { VLOG(3) << "Unloading HSACO module " << module; GpuDriver::UnloadModule(context_, module); gpu_binary_to_module_.erase(module_it); + const char* mem_it = nullptr; + for (auto x : in_memory_modules_) { + if (x.second == module) mem_it = x.first; + } + if (mem_it != nullptr) in_memory_modules_.erase(mem_it); } return true; } From 3d2641ecdfe1d9756ceafa1116a8fbfefd083d78 Mon Sep 17 00:00:00 2001 From: Austin Anderson Date: Fri, 24 Jul 2020 09:06:12 -0700 Subject: [PATCH 040/243] Backport rel/ build layout to r2.1 Files copied from their current state on the 2.1 branch. --- .../ci_build/rel/macos/cpu_libtensorflow.sh | 23 +++++++ .../ci_build/rel/macos/cpu_py35_nonpip.sh | 53 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py35_pip.sh | 55 +++++++++++++++ .../ci_build/rel/macos/cpu_py36_nonpip.sh | 54 +++++++++++++++ .../tools/ci_build/rel/macos/cpu_py36_pip.sh | 55 +++++++++++++++ .../ci_build/rel/macos/cpu_py37_nonpip.sh | 53 ++++++++++++++ .../tools/ci_build/rel/macos/cpu_py37_pip.sh | 55 +++++++++++++++ .../ci_build/rel/ubuntu/cpu_py35_nonpip.sh | 46 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py35_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_py36_nonpip.sh | 46 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py36_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/cpu_py37_nonpip.sh | 46 +++++++++++++ .../tools/ci_build/rel/ubuntu/cpu_py37_pip.sh | 52 ++++++++++++++ .../ci_build/rel/ubuntu/gpu_pip_on_cpu.sh | 56 +++++++++++++++ .../ci_build/rel/ubuntu/gpu_py35_nonpip.sh | 58 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py35_pip.sh | 69 +++++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py36_nonpip.sh | 58 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py36_pip.sh | 69 +++++++++++++++++++ .../ci_build/rel/ubuntu/gpu_py37_nonpip.sh | 58 ++++++++++++++++ .../tools/ci_build/rel/ubuntu/gpu_py37_pip.sh | 69 +++++++++++++++++++ .../tools/ci_build/rel/ubuntu/sanity.sh | 35 ++++++++++ .../rel/windows/cpu_libtensorflow.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py35.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py36.bat | 20 ++++++ .../tools/ci_build/rel/windows/cpu_py37.bat | 20 ++++++ .../rel/windows/gpu_libtensorflow.bat | 20 ++++++ .../ci_build/rel/windows/gpu_pip_on_cpu.bat | 21 ++++++ .../tools/ci_build/rel/windows/gpu_py35.bat | 23 +++++++ .../tools/ci_build/rel/windows/gpu_py36.bat | 23 +++++++ .../tools/ci_build/rel/windows/gpu_py37.bat | 23 +++++++ 30 files changed, 1304 insertions(+) create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh create mode 100755 tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh create mode 100644 tensorflow/tools/ci_build/rel/ubuntu/sanity.sh create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py35.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py36.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/cpu_py37.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py35.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py36.bat create mode 100644 tensorflow/tools/ci_build/rel/windows/gpu_py37.bat diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh b/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh new file mode 100644 index 00000000000000..c0846a05236227 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_libtensorflow.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +echo "chmod go+w lib_package/*" >> tensorflow/tools/ci_build/linux/libtensorflow.sh +echo "bazel clean --expunge" >> tensorflow/tools/ci_build/linux/libtensorflow.sh + +# Install latest bazel +source tensorflow/tools/ci_build/release/common.sh +update_bazel_macos + +tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh new file mode 100644 index 00000000000000..d821656ba12efe --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py35_nonpip.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python3.5 -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.5 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.5) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac,-no_oss_py35,-v1only,-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh new file mode 100644 index 00000000000000..4559c1896164eb --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py35_pip.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.5 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.5' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py35,-gpu,-tpu,-benchmark-test' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh new file mode 100644 index 00000000000000..93205f8a60d458 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py36_nonpip.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python3.6 -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.6 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.6) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac,-no_oss_py36,-v1only,-gpu,-tpu,-benchmark-test" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... + diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh new file mode 100644 index 00000000000000..0ae2c3b4069667 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py36_pip.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.6 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.6' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py35,-v1only,-gpu,-tpu,-benchmark-test' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh new file mode 100644 index 00000000000000..de34e7be8e33e1 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py37_nonpip.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" +python -m virtualenv tf_build_env --system-site-packages +source tf_build_env/bin/activate + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.7 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export TF2_BEHAVIOR=1 +export PYTHON_BIN_PATH=$(which python3.7) +yes "" | "$PYTHON_BIN_PATH" configure.py + +tag_filters="-no_oss,-oss_serial,-nomac,-no_mac$(maybe_skip_v1)" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} \ + -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh new file mode 100644 index 00000000000000..2d5fb071913aff --- /dev/null +++ b/tensorflow/tools/ci_build/rel/macos/cpu_py37_pip.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh +# Install latest bazel +update_bazel_macos +which bazel +bazel version +set_bazel_outdir + +# Pick a more recent version of xcode +export DEVELOPER_DIR=/Applications/Xcode_10.3.app/Contents/Developer +sudo xcode-select -s "${DEVELOPER_DIR}" + +# Install macos pip dependencies +install_macos_pip_deps sudo pip3.7 + +# Export required variables for running pip_new.sh +export OS_TYPE="MACOS" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.7' +export TF_BUILD_BOTH_CPU_PACKAGES=1 + +# Run configure. +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="//tensorflow/python/..." +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-nomac,-no_mac,-no_oss,-oss_serial,-no_oss_py37,-v1only,-gpu,-tpu,-benchmark-test' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh new file mode 100644 index 00000000000000..c134888d7c3c15 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_nonpip.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.5) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py35,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh new file mode 100644 index 00000000000000..6cfc7cfb97a8e9 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py35_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.5' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py35,-v1only' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh new file mode 100644 index 00000000000000..3c199a667b05d9 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_nonpip.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py36,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh new file mode 100644 index 00000000000000..7dff4ccddcde75 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py36_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.6' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py36,-v1only' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh new file mode 100644 index 00000000000000..7b68de4bc4f4ef --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_nonpip.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.7) +export TF2_BEHAVIOR=1 +yes "" | "$PYTHON_BIN_PATH" configure.py +tag_filters="-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-no_oss_py37,-v1only" + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Run tests +bazel test --test_output=errors --config=opt --test_lang_filters=py \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --build_tag_filters="${tag_filters}" \ + --test_tag_filters="${tag_filters}" -- \ + ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh new file mode 100644 index 00000000000000..1f77390eee06c3 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/cpu_py37_pip.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="CPU" +export TF_PYTHON_VERSION='python3.7' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=0 +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain" +export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1" +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py37,-v1only' +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME="tensorflow_cpu" +export TF_PIP_TEST_ROOT="pip_test" + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh new file mode 100755 index 00000000000000..d6c2df745e1f26 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_pip_on_cpu.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update Bazel to the desired version +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +######################## +## Build GPU pip package +######################## +bazel build --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + tensorflow/tools/pip_package:build_pip_package + +PIP_WHL_DIR=whl +mkdir -p ${PIP_WHL_DIR} +PIP_WHL_DIR=$(readlink -f ${PIP_WHL_DIR}) # Get absolute path +bazel-bin/tensorflow/tools/pip_package/build_pip_package "${PIP_WHL_DIR}" +WHL_PATH=$(ls "${PIP_WHL_DIR}"/*.whl) + +cp "${WHL_PATH}" "$(pwd)"/. +chmod +x tensorflow/tools/ci_build/builds/docker_cpu_pip.sh +docker run -e "BAZEL_VERSION=${BAZEL_VERSION}" -e "CI_BUILD_USER=$(id -u -n)" -e "CI_BUILD_UID=$(id -u)" -e "CI_BUILD_GROUP=$(id -g -n)" -e "CI_BUILD_GID=$(id -g)" -e "CI_BUILD_HOME=/bazel_pip" -v "$(pwd)":/bazel_pip tensorflow/tensorflow:devel-py3 "./bazel_pip/tensorflow/tools/ci_build/builds/with_the_same_user" "./bazel_pip/tensorflow/tools/ci_build/builds/docker_cpu_pip.sh" diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh new file mode 100644 index 00000000000000..13f6ce837a9717 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_nonpip.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.5) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py35" + +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --test_tag_filters=${tag_filters} \ + --build_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh new file mode 100644 index 00000000000000..4fe4edb8d9cad9 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py35_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.5 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.5' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py35' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh new file mode 100644 index 00000000000000..38ce102e990e5b --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_nonpip.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.6) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py36" + +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --test_tag_filters=${tag_filters} \ + --build_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh new file mode 100644 index 00000000000000..e24b9f5019f249 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py36_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.6 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.6' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py36' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh new file mode 100644 index 00000000000000..0a7bbb381378aa --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_nonpip.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +update_bazel_linux + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which python3.7) +export TF2_BEHAVIOR=1 +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +tag_filters="gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py37" + +bazel test --config=cuda --config=opt \ + --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \ + --linkopt=-lrt \ + --action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \ + --test_lang_filters=py \ + --build_tag_filters=${tag_filters} \ + --test_tag_filters=${tag_filters} \ + --test_timeout="300,450,1200,3600" --local_test_jobs=4 \ + --test_output=errors --verbose_failures=true --keep_going \ + --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ + -- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... diff --git a/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh new file mode 100644 index 00000000000000..ff30c1e88af401 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/gpu_py37_pip.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e +set -x + +source tensorflow/tools/ci_build/release/common.sh + +install_ubuntu_16_pip_deps pip3.7 +# Update bazel +update_bazel_linux + +# Export required variables for running pip.sh +export OS_TYPE="UBUNTU" +export CONTAINER_TYPE="GPU" +export TF_PYTHON_VERSION='python3.7' + +# Run configure. +export TF_NEED_GCP=1 +export TF_NEED_HDFS=1 +export TF_NEED_S3=1 +export TF_NEED_CUDA=1 +export TF_CUDA_VERSION=10.1 +export TF_CUDNN_VERSION=7 +export TF_NEED_TENSORRT=1 +export TENSORRT_INSTALL_PATH=/usr/local/tensorrt +export CC_OPT_FLAGS='-mavx' +export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) +export PROJECT_NAME="tensorflow_gpu" +export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" +export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 + +yes "" | "$PYTHON_BIN_PATH" configure.py + +# Get the default test targets for bazel. +source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh + +# Export optional variables for running pip.sh +export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py37' +export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " +export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ +--distinct_host_configuration=false \ +--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \ +--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ +--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ +--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " +export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... " +export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" +export IS_NIGHTLY=0 # Not nightly +export TF_PROJECT_NAME=${PROJECT_NAME} +export TF_PIP_TEST_ROOT="pip_test" + +# To build both tensorflow and tensorflow-gpu pip packages +export TF_BUILD_BOTH_GPU_PACKAGES=1 + +./tensorflow/tools/ci_build/builds/pip_new.sh diff --git a/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh b/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh new file mode 100644 index 00000000000000..d504650da458fe --- /dev/null +++ b/tensorflow/tools/ci_build/rel/ubuntu/sanity.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +set -e + +# Install latest bazel +source tensorflow/tools/ci_build/release/common.sh +update_bazel_linux +which bazel + +# We need py3 lint +sudo pip3 install pep8 + +# TODO(gunan): figure out why we get stuck with later versions of pylint. +# Install pylint. +sudo python2 -m pip install pylint==1.6.4 +sudo python3 -m pip install pylint==1.6.4 + +# TODO(yifeif): print pylint version for debug. remove later. +python3 -m pylint --version + +# Run tensorflow sanity checks. +tensorflow/tools/ci_build/ci_sanity.sh diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat b/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat new file mode 100644 index 00000000000000..67941234b155c0 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_libtensorflow.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\bazel\run_libtensorflow.bat || exit /b 1 + +copy lib_package %TF_ARTIFACTS_DIR%\lib_package diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat new file mode 100644 index 00000000000000..bd8c217ddefe77 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py35.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python35 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat new file mode 100644 index 00000000000000..0a81a90a43164c --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py36.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat new file mode 100644 index 00000000000000..9591d7aac343bd --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/cpu_py37.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python37 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\cpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow_cpu" diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat b/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat new file mode 100644 index 00000000000000..8ab78bef3ca0af --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_libtensorflow.bat @@ -0,0 +1,20 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\bazel\run_libtensorflow.bat || exit /b + +copy lib_package %TF_ARTIFACTS_DIR%\lib_package diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat b/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat new file mode 100644 index 00000000000000..213de532069244 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_pip_on_cpu.bat @@ -0,0 +1,21 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\integration\gpu_pip_on_cpu\run.bat + diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat new file mode 100644 index 00000000000000..cba62225bee4fe --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python35 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat new file mode 100644 index 00000000000000..ede8bd35f52f24 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python36 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat new file mode 100644 index 00000000000000..7509270fc43796 --- /dev/null +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat @@ -0,0 +1,23 @@ +:: Copyright 2019 The TensorFlow Authors. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: ============================================================================= + +SET PYTHON_DIRECTORY=Python37 + +CALL tensorflow\tools\ci_build\release\common_win.bat + +call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" + +for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" +bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file From c406e34393033d78e0cd62aec5381157752c4682 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 29 Jul 2020 10:46:31 -0700 Subject: [PATCH 041/243] Remove scipy dependency. See #40884, #35709, #40789. --- tensorflow/tools/pip_package/setup.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index cf796b4e61cf3e..f38c356d2401e8 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -73,10 +73,6 @@ # functools comes with python3, need to install the backport for python2 'functools32 >= 3.2.3;python_version<"3"', 'six >= 1.12.0', - # scipy < 1.4.1 causes segfaults due to pybind11 - # Latest scipy pip for py2 is scipy==1.2.2 - 'scipy == 1.4.1;python_version>="3"', - 'scipy == 1.2.2;python_version<"3"', ] if sys.byteorder == 'little': From a4db2bc15a2c76a942d5032587bd53db8aa484bd Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Thu, 13 Aug 2020 20:25:53 +0000 Subject: [PATCH 042/243] Changing the name of the HIP Runtime library to amdhip64 for ROCm 3.5+ --- tensorflow/stream_executor/platform/default/dso_loader.cc | 8 +++++++- third_party/gpus/rocm_configure.bzl | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tensorflow/stream_executor/platform/default/dso_loader.cc b/tensorflow/stream_executor/platform/default/dso_loader.cc index 9ae8b41ccf47f8..4ed5f84c51c1c9 100644 --- a/tensorflow/stream_executor/platform/default/dso_loader.cc +++ b/tensorflow/stream_executor/platform/default/dso_loader.cc @@ -134,7 +134,13 @@ port::StatusOr GetRocrandDsoHandle() { return GetDsoHandle("rocrand", ""); } -port::StatusOr GetHipDsoHandle() { return GetDsoHandle("hip_hcc", ""); } +port::StatusOr GetHipDsoHandle() { +#if TENSORFLOW_COMPILER_IS_HIP_CLANG + return GetDsoHandle("amdhip64", ""); +#else + return GetDsoHandle("hip_hcc", ""); +#endif +} } // namespace DsoLoader diff --git a/third_party/gpus/rocm_configure.bzl b/third_party/gpus/rocm_configure.bzl index dff65f19e0c724..d72b3f1d14e1f0 100644 --- a/third_party/gpus/rocm_configure.bzl +++ b/third_party/gpus/rocm_configure.bzl @@ -457,7 +457,7 @@ def _find_libs(repository_ctx, rocm_config): """ return { "hip": _find_rocm_lib( - "hip_hcc", + _if_hipcc_is_hipclang(repository_ctx, rocm_config, "amdhip64", "hip_hcc"), repository_ctx, rocm_config.rocm_toolkit_path, ), @@ -832,7 +832,7 @@ def _create_local_rocm_repository(repository_ctx): "%{rocr_runtime_path}": rocm_config.rocm_toolkit_path + "/lib", "%{rocr_runtime_library}": "hsa-runtime64", "%{hip_runtime_path}": rocm_config.rocm_toolkit_path + "/hip/lib", - "%{hip_runtime_library}": "hip_hcc", + "%{hip_runtime_library}": _if_hipcc_is_hipclang(repository_ctx, rocm_config, "amdhip64", "hip_hcc"), "%{hcc_runtime_path}": rocm_config.rocm_toolkit_path + "/hcc/lib", "%{hcc_runtime_library}": "mcwamp", "%{crosstool_verbose}": _crosstool_verbose(repository_ctx), From bc2230f741793a0932eb186b4ca7254a4908648e Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Thu, 20 Aug 2020 16:28:51 -0700 Subject: [PATCH 043/243] Pin numpy version to 1.19 --- tensorflow/tools/pip_package/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index cf796b4e61cf3e..1a7d677e69c4dd 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -58,7 +58,7 @@ 'google_pasta >= 0.1.6', 'keras_applications >= 1.0.8', 'keras_preprocessing == 1.1.0', - 'numpy >= 1.16.0, < 2.0', + 'numpy >= 1.16.0, < 1.19.0', 'opt_einsum >= 2.3.2', 'protobuf >= 3.8.0', 'tensorboard >= 2.1.0, < 2.2.0', From d4951377480d10ba5edf1d6dd1d09823d549afa2 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 28 Aug 2020 12:37:55 -0700 Subject: [PATCH 044/243] Updating ROCm CI scripts to use ROCm 3.7 --- .../tools/ci_build/linux/rocm/run_cc_core.sh | 23 +++++++++----- .../ci_build/linux/rocm/run_csb_tests.sh | 30 ++++++++++++------- .../tools/ci_build/linux/rocm/run_py3_core.sh | 23 +++++++++----- 3 files changed, 52 insertions(+), 24 deletions(-) diff --git a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh index 49b23363e4e7d9..bd879e5ba109a9 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh @@ -18,20 +18,27 @@ set -e set -x -N_JOBS=$(grep -c ^processor /proc/cpuinfo) -N_GPUS=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +N_BUILD_JOBS=$(grep -c ^processor /proc/cpuinfo) +TF_GPU_COUNT=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +TF_TESTS_PER_GPU=1 +N_TEST_JOBS=$(expr ${TF_GPU_COUNT} \* ${TF_TESTS_PER_GPU}) echo "" -echo "Bazel will use ${N_JOBS} concurrent build job(s) and ${N_GPUS} concurrent test job(s)." +echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} concurrent test job(s)." echo "" +# First positional argument (if any) specifies the ROCM_INSTALL_DIR +ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +if [[ -n $1 ]]; then + ROCM_INSTALL_DIR=$1 +fi + # Run configure. export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 -export ROCM_PATH=/opt/rocm-3.3.0 -export TF_GPU_COUNT=${N_GPUS} +export ROCM_PATH=$ROCM_INSTALL_DIR yes "" | $PYTHON_BIN_PATH configure.py @@ -41,8 +48,10 @@ bazel test \ -k \ --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ --test_lang_filters=cc \ - --jobs=${N_JOBS} \ - --local_test_jobs=${TF_GPU_COUNT}\ + --jobs=${N_BUILD_JOBS} \ + --local_test_jobs=${N_TEST_JOBS} \ + --test_env=TF_GPU_COUNT=$TF_GPU_COUNT \ + --test_env=TF_TESTS_PER_GPU=$TF_TESTS_PER_GPU \ --test_timeout 600,900,2400,7200 \ --build_tests_only \ --test_output=errors \ diff --git a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh index 5e481112f12a11..31d10da0596b0b 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh @@ -18,20 +18,27 @@ set -e set -x -N_JOBS=$(grep -c ^processor /proc/cpuinfo) -N_GPUS=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +N_BUILD_JOBS=$(grep -c ^processor /proc/cpuinfo) +TF_GPU_COUNT=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +TF_TESTS_PER_GPU=1 +N_TEST_JOBS=$(expr ${TF_GPU_COUNT} \* ${TF_TESTS_PER_GPU}) echo "" -echo "Bazel will use ${N_JOBS} concurrent build job(s) and ${N_GPUS} concurrent test job(s)." +echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} concurrent test job(s)." echo "" +# First positional argument (if any) specifies the ROCM_INSTALL_DIR +ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +if [[ -n $1 ]]; then + ROCM_INSTALL_DIR=$1 +fi + # Run configure. export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 -export ROCM_PATH=/opt/rocm-3.3.0 -export TF_GPU_COUNT=${N_GPUS} +export ROCM_PATH=$ROCM_INSTALL_DIR yes "" | $PYTHON_BIN_PATH configure.py @@ -39,12 +46,15 @@ yes "" | $PYTHON_BIN_PATH configure.py bazel test \ --config=rocm \ -k \ - --test_tag_filters=gpu,-no_gpu,-no_rocm,-benchmark-test,-no_oss,-oss_serial,-rocm_multi_gpu, \ + --test_tag_filters=gpu,-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ + --jobs=${N_BUILD_JOBS} \ + --local_test_jobs=${N_TEST_JOBS} \ + --test_env=TF_GPU_COUNT=$TF_GPU_COUNT \ + --test_env=TF_TESTS_PER_GPU=$TF_TESTS_PER_GPU \ --test_timeout 600,900,2400,7200 \ --test_output=errors \ - --jobs=${N_JOBS} \ - --local_test_jobs=${TF_GPU_COUNT} \ --test_sharding_strategy=disabled \ + --test_size_filters=small,medium \ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \ -- \ //tensorflow/... \ @@ -57,8 +67,8 @@ bazel test \ --test_tag_filters=gpu \ --test_timeout 600,900,2400,7200 \ --test_output=errors \ - --jobs=${N_JOBS} \ - --local_test_jobs=1 \ + --jobs=${N_BUILD_JOBS} \ + --local_test_jobs=${N_TEST_JOBS} \ --test_sharding_strategy=disabled \ -- \ //tensorflow/core/nccl:nccl_manager_test diff --git a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh index d55fa56f970f20..4ad67546dc16a2 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh @@ -18,20 +18,27 @@ set -e set -x -N_JOBS=$(grep -c ^processor /proc/cpuinfo) -N_GPUS=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +N_BUILD_JOBS=$(grep -c ^processor /proc/cpuinfo) +TF_GPU_COUNT=$(lspci|grep 'controller'|grep 'AMD/ATI'|wc -l) +TF_TESTS_PER_GPU=1 +N_TEST_JOBS=$(expr ${TF_GPU_COUNT} \* ${TF_TESTS_PER_GPU}) echo "" -echo "Bazel will use ${N_JOBS} concurrent build job(s) and ${N_GPUS} concurrent test job(s)." +echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} concurrent test job(s)." echo "" +# First positional argument (if any) specifies the ROCM_INSTALL_DIR +ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +if [[ -n $1 ]]; then + ROCM_INSTALL_DIR=$1 +fi + # Run configure. export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 -export ROCM_PATH=/opt/rocm-3.3.0 -export TF_GPU_COUNT=${N_GPUS} +export ROCM_PATH=$ROCM_INSTALL_DIR yes "" | $PYTHON_BIN_PATH configure.py @@ -41,8 +48,10 @@ bazel test \ -k \ --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test,-rocm_multi_gpu,-v1only \ --test_lang_filters=py \ - --jobs=${N_JOBS} \ - --local_test_jobs=${TF_GPU_COUNT} \ + --jobs=${N_BUILD_JOBS} \ + --local_test_jobs=${N_TEST_JOBS} \ + --test_env=TF_GPU_COUNT=$TF_GPU_COUNT \ + --test_env=TF_TESTS_PER_GPU=$TF_TESTS_PER_GPU \ --test_timeout 600,900,2400,7200 \ --build_tests_only \ --test_output=errors \ From a7bca9620dca0aab0863d4aa502fee8e36cfd9d2 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Fri, 28 Aug 2020 12:44:41 -0700 Subject: [PATCH 045/243] Updating Dockerfile.rocm to use ROCm 3.7 --- tensorflow/tools/ci_build/Dockerfile.rocm | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index 6d124204ed82d9..f63e5fec60451a 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -3,8 +3,10 @@ FROM ubuntu:xenial MAINTAINER Jeff Poznanovic -ARG DEB_ROCM_REPO=http://repo.radeon.com/rocm/apt/3.3/ -ARG ROCM_PATH=/opt/rocm-3.3.0 +ARG ROCM_DEB_REPO=http://repo.radeon.com/rocm/apt/3.7/ +ARG ROCM_BUILD_NAME=xenial +ARG ROCM_BUILD_NUM=main +ARG ROCM_PATH=/opt/rocm-3.7.0 ENV DEBIAN_FRONTEND noninteractive ENV TF_NEED_ROCM 1 @@ -13,8 +15,12 @@ RUN apt update && apt install -y wget software-properties-common # Add rocm repository RUN apt-get clean all -RUN wget -qO - $DEB_ROCM_REPO/rocm.gpg.key | apt-key add - -RUN sh -c "echo deb [arch=amd64] $DEB_ROCM_REPO xenial main > /etc/apt/sources.list.d/rocm.list" +RUN bin/bash -c 'if [[ $ROCM_DEB_REPO == http://repo.radeon.com/rocm/* ]] ; then \ + wget -qO - $ROCM_DEB_REPO/rocm.gpg.key | apt-key add -; \ + echo "deb [arch=amd64] $ROCM_DEB_REPO $ROCM_BUILD_NAME $ROCM_BUILD_NUM" > /etc/apt/sources.list.d/rocm.list; \ + else \ + echo "deb [arch=amd64 trusted=yes] $ROCM_DEB_REPO $ROCM_BUILD_NAME $ROCM_BUILD_NUM" > /etc/apt/sources.list.d/rocm.list ; \ + fi' # Install misc pkgs RUN apt-get update --allow-insecure-repositories && DEBIAN_FRONTEND=noninteractive apt-get install -y \ From 244da03c374e7143a178b3d38e069c6fc4a7ba04 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Sat, 29 Aug 2020 12:11:05 -0700 Subject: [PATCH 046/243] Updating Dockerfile.rocm and install scripts to use Ubuntu 18.04 (required for ROCm 3.7) --- tensorflow/tools/ci_build/Dockerfile.rocm | 10 ++++------ .../ci_build/install/install_deb_packages.sh | 6 +++++- .../ci_build/install/install_pip_packages.sh | 18 ++++++++++++++++-- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index f63e5fec60451a..d209173258ada0 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -1,6 +1,6 @@ # This Dockerfile provides a starting point for a ROCm installation of # MIOpen and tensorflow. -FROM ubuntu:xenial +FROM ubuntu:bionic MAINTAINER Jeff Poznanovic ARG ROCM_DEB_REPO=http://repo.radeon.com/rocm/apt/3.7/ @@ -25,9 +25,9 @@ RUN bin/bash -c 'if [[ $ROCM_DEB_REPO == http://repo.radeon.com/rocm/* ]] ; the # Install misc pkgs RUN apt-get update --allow-insecure-repositories && DEBIAN_FRONTEND=noninteractive apt-get install -y \ build-essential \ - clang-3.8 \ - clang-format-3.8 \ - clang-tidy-3.8 \ + clang-6.0 \ + clang-format-6.0 \ + clang-tidy-6.0 \ cmake \ cmake-qt-gui \ ssh \ @@ -97,8 +97,6 @@ RUN touch ${ROCM_PATH}/.info/version COPY install/*.sh /install/ ARG DEBIAN_FRONTEND=noninteractive RUN /install/install_bootstrap_deb_packages.sh -RUN add-apt-repository -y ppa:openjdk-r/ppa && \ - add-apt-repository -y ppa:george-edison55/cmake-3.x RUN /install/install_deb_packages.sh RUN /install/install_pip_packages.sh RUN /install/install_bazel.sh diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh index bd810016d2a050..ae9bf52309b8bc 100755 --- a/tensorflow/tools/ci_build/install/install_deb_packages.sh +++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh @@ -38,12 +38,16 @@ if [[ "$ubuntu_version" == "14" ]]; then apt-get dist-upgrade -y fi +if [[ "$ubuntu_version" == "16" ]]; then + apt-get install -y --no-install-recommends \ + clang-format-3.8 +fi + ## TODO(yifeif) remove ffmpeg once ffmpeg is removed from contrib apt-get install -y --no-install-recommends \ autoconf \ automake \ build-essential \ - clang-format-3.8 \ curl \ ffmpeg \ git \ diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh index 170482b45657c7..88b2d9a227e0d0 100755 --- a/tensorflow/tools/ci_build/install/install_pip_packages.sh +++ b/tensorflow/tools/ci_build/install/install_pip_packages.sh @@ -15,10 +15,24 @@ # ============================================================================== set -e +ubuntu_version=$(cat /etc/issue | grep -i ubuntu | awk '{print $2}' | \ + awk -F'.' '{print $1}') + +if [[ "$1" != "" ]] && [[ "$1" != "--without_cmake" ]]; then + echo "Unknown argument '$1'" + exit 1 +fi # Get the latest version of pip so it recognize manylinux2010 -easy_install3 -U pip -easy_install -U pip +if [[ "$ubuntu_version" == "18" ]]; then + wget https://bootstrap.pypa.io/get-pip.py + python3 get-pip.py + python get-pip.py + rm -f get-pip.py +else + easy_install3 -U pip + easy_install -U pip +fi # Install pip packages from whl files to avoid the time-consuming process of # building from source. From 9cd97df9a439d0e9ccd74f76169df04347d7022c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 16 Sep 2020 19:54:12 -0700 Subject: [PATCH 047/243] Disable test that fails with newer absl_py syntax --- tensorflow/python/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index f2ca67521f257c..e0f7bc9de3530c 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -4412,6 +4412,7 @@ cuda_py_test( "//tensorflow/python/eager:def_function", ], shard_count = 2, + tags = ["no_pip"], ) cuda_py_test( From 31c2b47c26180f15f295612fe4716045466cd0c5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 16 Sep 2020 20:10:36 -0700 Subject: [PATCH 048/243] Update common_win.bat Pin numpy --- tensorflow/tools/ci_build/release/common_win.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/release/common_win.bat b/tensorflow/tools/ci_build/release/common_win.bat index 95b09008c542e0..8c7f763fa5ecc2 100644 --- a/tensorflow/tools/ci_build/release/common_win.bat +++ b/tensorflow/tools/ci_build/release/common_win.bat @@ -30,7 +30,7 @@ SET PATH=%PATH%;C:\%PYTHON_DIRECTORY% %PIP_EXE% install future>=0.17.1 --no-deps %PIP_EXE% install tf-estimator-nightly --no-deps %PIP_EXE% install tb-nightly --no-deps -%PIP_EXE% install numpy --upgrade --no-deps +%PIP_EXE% install numpy==1.18.5 --upgrade --no-deps %PIP_EXE% install opt_einsum --upgrade %PIP_EXE% install pandas --upgrade --no-deps %PIP_EXE% install protobuf --upgrade --no-deps From a5f55dba6ae4e6a056c1e7411b2ac2c3c73b7bdf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 10:41:05 -0700 Subject: [PATCH 049/243] Pin estimator nightly to the latest 2.1 version --- tensorflow/tools/ci_build/release/common.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/tools/ci_build/release/common.sh b/tensorflow/tools/ci_build/release/common.sh index 7b273fbfed1cc5..2b4565ec4a09e4 100644 --- a/tensorflow/tools/ci_build/release/common.sh +++ b/tensorflow/tools/ci_build/release/common.sh @@ -147,7 +147,7 @@ function install_pip_deps { ${SUDO_CMD} ${PIP_CMD} install scikit-learn==0.20.3 ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs - ${PIP_CMD} install --user --upgrade tf-estimator-nightly + ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 ${PIP_CMD} install --user --upgrade "future>=0.17.1" # =================================================================== } @@ -181,7 +181,7 @@ function install_ubuntu_16_pip_deps { "${PIP_CMD}" install portpicker --user "${PIP_CMD}" install scipy --user "${PIP_CMD}" install scikit-learn --user - "${PIP_CMD}" install --user --upgrade tf-estimator-nightly + "${PIP_CMD}" install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 "${PIP_CMD}" install --user --upgrade tb-nightly # =================================================================== } @@ -224,7 +224,7 @@ function install_macos_pip_deps { ${SUDO_CMD} ${PIP_CMD} install --upgrade grpcio ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs - ${PIP_CMD} install --user --upgrade tf-estimator-nightly + ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 ${PIP_CMD} install --user --upgrade "future>=0.17.1" } From 262fe212ef3d0978c56714360fb1dc27752457e4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 10:42:59 -0700 Subject: [PATCH 050/243] Also pin on Windows --- tensorflow/tools/ci_build/release/common_win.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/release/common_win.bat b/tensorflow/tools/ci_build/release/common_win.bat index 8c7f763fa5ecc2..62c88f46e62c67 100644 --- a/tensorflow/tools/ci_build/release/common_win.bat +++ b/tensorflow/tools/ci_build/release/common_win.bat @@ -28,7 +28,7 @@ SET PATH=%PATH%;C:\%PYTHON_DIRECTORY% %PIP_EXE% install setuptools --upgrade %PIP_EXE% install future>=0.17.1 --no-deps -%PIP_EXE% install tf-estimator-nightly --no-deps +%PIP_EXE% install tf-estimator-nightly==2.1.0.dev2020031101 --no-deps %PIP_EXE% install tb-nightly --no-deps %PIP_EXE% install numpy==1.18.5 --upgrade --no-deps %PIP_EXE% install opt_einsum --upgrade From fe3bf71dbc73ddfe247245f37dd0ca3ada912064 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 13:56:05 -0700 Subject: [PATCH 051/243] Update common.sh --- tensorflow/tools/ci_build/release/common.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/tools/ci_build/release/common.sh b/tensorflow/tools/ci_build/release/common.sh index 2b4565ec4a09e4..fe1c9f7769da59 100644 --- a/tensorflow/tools/ci_build/release/common.sh +++ b/tensorflow/tools/ci_build/release/common.sh @@ -147,7 +147,7 @@ function install_pip_deps { ${SUDO_CMD} ${PIP_CMD} install scikit-learn==0.20.3 ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs - ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 + ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.0.0.dev2020011309 ${PIP_CMD} install --user --upgrade "future>=0.17.1" # =================================================================== } @@ -181,7 +181,7 @@ function install_ubuntu_16_pip_deps { "${PIP_CMD}" install portpicker --user "${PIP_CMD}" install scipy --user "${PIP_CMD}" install scikit-learn --user - "${PIP_CMD}" install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 + "${PIP_CMD}" install --user --upgrade tf-estimator-nightly==2.0.0.dev2020011309 "${PIP_CMD}" install --user --upgrade tb-nightly # =================================================================== } @@ -224,7 +224,7 @@ function install_macos_pip_deps { ${SUDO_CMD} ${PIP_CMD} install --upgrade grpcio ${SUDO_CMD} ${PIP_CMD} install --upgrade tb-nightly ${PIP_CMD} install --user --upgrade attrs - ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.1.0.dev2020031101 + ${PIP_CMD} install --user --upgrade tf-estimator-nightly==2.0.0.dev2020011309 ${PIP_CMD} install --user --upgrade "future>=0.17.1" } From 9939886cf7021627148da9dbb66748dde731b866 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 13:56:58 -0700 Subject: [PATCH 052/243] Update common_win.bat --- tensorflow/tools/ci_build/release/common_win.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tools/ci_build/release/common_win.bat b/tensorflow/tools/ci_build/release/common_win.bat index 62c88f46e62c67..3b19e5a35fdce8 100644 --- a/tensorflow/tools/ci_build/release/common_win.bat +++ b/tensorflow/tools/ci_build/release/common_win.bat @@ -28,7 +28,7 @@ SET PATH=%PATH%;C:\%PYTHON_DIRECTORY% %PIP_EXE% install setuptools --upgrade %PIP_EXE% install future>=0.17.1 --no-deps -%PIP_EXE% install tf-estimator-nightly==2.1.0.dev2020031101 --no-deps +%PIP_EXE% install tf-estimator-nightly==2.0.0.dev2020011309 --no-deps %PIP_EXE% install tb-nightly --no-deps %PIP_EXE% install numpy==1.18.5 --upgrade --no-deps %PIP_EXE% install opt_einsum --upgrade From 44d1ce1d3d607645fa87403972e351c4727645c0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 17 Sep 2020 19:28:59 -0700 Subject: [PATCH 053/243] Fix rename of gpu pips for single pip package on Windows GPU --- tensorflow/tools/ci_build/rel/windows/gpu_py35.bat | 3 +-- tensorflow/tools/ci_build/rel/windows/gpu_py36.bat | 3 +-- tensorflow/tools/ci_build/rel/windows/gpu_py37.bat | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat index cba62225bee4fe..8a21961fdef3db 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py35.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh +bash -l tensorflow\tools\ci_build\release\windows\gpu_py35_full\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat index ede8bd35f52f24..7c4a395f62dd11 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py36.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file +bash -l tensorflow\tools\ci_build\release\windows\gpu_py36_full\release_pip_rename.sh diff --git a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat index 7509270fc43796..97eb1168d1ce0d 100644 --- a/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat +++ b/tensorflow/tools/ci_build/rel/windows/gpu_py37.bat @@ -19,5 +19,4 @@ CALL tensorflow\tools\ci_build\release\common_win.bat call tensorflow\tools\ci_build\windows\gpu\pip\run.bat --release_build --extra_build_flags "--config=v2" --extra_test_flags "--test_env=TF2_BEHAVIOR=1" --project_name "tensorflow" -for %%a in ("%~dp0\.") do set "PARENT_DIR=%%~nxa" -bash -l tensorflow\tools\ci_build\release\windows\%PARENT_DIR%\release_pip_rename.sh \ No newline at end of file +bash -l tensorflow\tools\ci_build\release\windows\gpu_py37_full\release_pip_rename.sh From 07bdb00c8c42141db0f12442343829043a3725ee Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 10:56:45 -0700 Subject: [PATCH 054/243] Bump sqlite to 3.33.0 This should handle CVE-2020-15358. PiperOrigin-RevId: 332484006 Change-Id: Id2e7c4e877fcfaa53184fd21139a00f3234a5e3d --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 79b2bcf1c10298..24340f36be9c2c 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -275,12 +275,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "e9cec01d4519e2d49b3810615237325263fe1feaceae390ee12b4a29bd73dbe2", - strip_prefix = "sqlite-amalgamation-3320300", + sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6", + strip_prefix = "sqlite-amalgamation-3330000", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", - "https://www.sqlite.org/2020/sqlite-amalgamation-3320300.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", ], ) From e4ca1a4a6a9943de9d4aabcbe080dcaf5490db96 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 16:26:21 -0700 Subject: [PATCH 055/243] [tflite] Don't check for buffers on every subgraph. Buffers in the model are allocated globally, hence it makes sense to check for their presence only once (O(1)) instead of on every subgraph (O(n)). PiperOrigin-RevId: 323677724 Change-Id: I2da0c381093006828cc4c80f03dec8a917782861 --- tensorflow/lite/model.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/model.cc b/tensorflow/lite/model.cc index eb757270448508..8998ce2299befb 100644 --- a/tensorflow/lite/model.cc +++ b/tensorflow/lite/model.cc @@ -567,6 +567,11 @@ TfLiteStatus InterpreterBuilder::operator()( return cleanup_and_error(); } + if (!buffers) { + error_reporter_->Report("No buffers in the model.\n"); + return cleanup_and_error(); + } + interpreter->reset(new Interpreter(error_reporter_)); (*interpreter)->SetNumThreads(num_threads); if (subgraphs->Length() > 1) { @@ -580,9 +585,9 @@ TfLiteStatus InterpreterBuilder::operator()( (*interpreter)->subgraph(subgraph_index); auto operators = subgraph->operators(); auto tensors = subgraph->tensors(); - if (!operators || !tensors || !buffers) { + if (!operators || !tensors) { error_reporter_->Report( - "Did not get operators, tensors, or buffers in subgraph %d.\n", + "Did not get operators or tensors in subgraph %d.\n", subgraph_index); return cleanup_and_error(); } From e6b213cebb56f485bd400961a2ed109aeeac9d3c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 13:10:41 -0700 Subject: [PATCH 056/243] [tflite] Test for `kTfLiteOptionalTensor` in `GetInput`. `GetInput`, `GetVariableInput` and `GetOutput` all fail to check for the case where `node->inputs->data[index]` is the special `kTfLiteOptionalTensor` value (-1) which then causes `context->tensors[node->inputs->data[index]]` to read from invalid memory location. This fix makes `GetInput` and related return `nullptr` in those cases, asking the caller to check for `nullptr`. This is better than having `GetOptionalInputTensor` and `GetOptionalOutputTensor` (does not exist but could be added) as using the patched `GetInput` in error would be caught by a sanitizer test in the default optimized build (due to the `-fsanitize=null` option). PiperOrigin-RevId: 332512190 Change-Id: Iabca54da2f2de02b6ece3c38b54f76d4277d689e --- tensorflow/lite/kernels/kernel_util.h | 35 ++++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 1026cfcefeb2da..7575f12c835026 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -30,28 +30,45 @@ inline int SizeOfDimension(const TfLiteTensor* t, int dim) { } inline const TfLiteTensor* GetInput(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context - ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])]; + const int tensor_index = flatbuffers::EndianScalar(node->inputs->data[index]); + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } inline TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { - TfLiteTensor* tensor = - &context->tensors[flatbuffers::EndianScalar(node->inputs->data[index])]; + const int tensor_index = flatbuffers::EndianScalar(node->inputs->data[index]); + if (tensor_index < 0) { + return nullptr; + } + TfLiteTensor* tensor = &context->tensors[tensor_index]; return (tensor->is_variable) ? tensor : nullptr; } inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context - ->tensors[flatbuffers::EndianScalar(node->outputs->data[index])]; + const int tensor_index = flatbuffers::EndianScalar(node->outputs->data[index]); + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } inline TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[flatbuffers::EndianScalar( - node->temporaries->data[index])]; + const int tensor_index = flatbuffers::EndianScalar(node->temporaries->data[index]); + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } + inline const TfLiteTensor* GetIntermediates(TfLiteContext* context, const TfLiteNode* node, int index) { - return &context->tensors[node->intermediates->data[index]]; + const int tensor_index = flatbuffers::EndianScalar(node->intermediates->data[index]); + if (tensor_index < 0) { + return nullptr; + } + return &context->tensors[tensor_index]; } inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } From e47eb1453f35666795a31e208c28922b08756c69 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 18:40:41 -0700 Subject: [PATCH 057/243] [tflite] Make `GetOptionalInputTensor` the same as `GetInput`. With the previous change, there is no more need for two separate APIs. We would deprecate `GetOptionalInputTensor` in the future. PiperOrigin-RevId: 332513386 Change-Id: Id7110271c25ebd6126ad8c82a493e37e0e0756b3 --- tensorflow/lite/kernels/kernel_util.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 7575f12c835026..62b61fc36701b5 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -91,12 +91,7 @@ inline int64_t NumElements(const TfLiteTensor* t) { inline const TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context, const TfLiteNode* node, int index) { - const bool use_tensor = node->inputs->data[index] != kOptionalTensor; - if (use_tensor) { - return &context - ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])]; - } - return nullptr; + return GetInput(context, node, index); } // Determines whether tensor is constant. From 1c8709b437fec10875b0cf271889afec9bbf582e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 19:22:10 -0700 Subject: [PATCH 058/243] [tflite] Ensure inputs and outputs don't overlap. If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen. PiperOrigin-RevId: 332522916 Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a --- tensorflow/lite/core/subgraph.cc | 37 ++++++++++++++++++++++++++++++++ tensorflow/lite/core/subgraph.h | 9 ++++++++ 2 files changed, 46 insertions(+) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index b298ffa769b101..8b1c144a8613b7 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -465,6 +465,33 @@ TfLiteStatus Subgraph::CheckTensorIndices(const char* label, const int* indices, return kTfLiteOk; } +// We have two arrays and we need to check that elements from one array don't +// show up in the other. We could sort both arrays and then iterate with two +// pointers from start to finish always increasing the smaller one but since +// these arrays are usually short (<25 elements for inputs, usually <3 for +// outputs), this might be slower than the naive approach (if arrays have size n +// and m, with n >> m ~ O(1), first approach is O(nlogn) whereas the other is +// O(n)). Plus, sorting the input and output arrays might not be something we +// want as it destroys ordering of elements. +// +// If it turns out that this is an issue, we can switch to the other algorithm. +TfLiteStatus Subgraph::CheckInputAndOutputForOverlap(const int* input_indices, + int num_inputs, + const int* output_indices, + int num_outputs) { + for (int i = 0; i < num_inputs; i++) { + for (int j = 0; j < num_outputs; j++) { + if (input_indices[i] == output_indices[j]) { + ReportError("Tensor %d is both input %d and output %d\n", + input_indices[i], i, j); + consistent_ = false; + return kTfLiteError; + } + } + } + return kTfLiteOk; +} + TfLiteStatus Subgraph::BytesRequired(TfLiteType type, const int* dims, size_t dims_size, size_t* bytes) { // TODO(aselle): Check for overflow here using overflow.h in TensorFlow @@ -552,6 +579,16 @@ TfLiteStatus Subgraph::AddNodeWithParameters( &context_, CheckTensorIndices("node outputs", outputs.data(), outputs.size())); + // For builtin ops, inputs and outputs must not overlap. Custom ops must do + // this check by themselves if they don't support overlapping tensors. This + // distinction is to allow custom ops to just forward a tensor, reusing it as + // both input and output. + if (builtin_data != nullptr) { + TF_LITE_ENSURE_OK(&context_, CheckInputAndOutputForOverlap( + inputs.data(), inputs.size(), + outputs.data(), outputs.size())); + } + int new_node_index = nodes_and_registration_.size(); if (node_index) *node_index = new_node_index; nodes_and_registration_.resize(nodes_and_registration_.size() + 1); diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index 17310447c163d2..91e098b7059d1e 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -399,6 +399,15 @@ class Subgraph { TfLiteStatus CheckTensorIndices(const char* label, const int* indices, int length); + // Check that the input indices and the output indices don't overlap. + // This is needed because same tensor must not be used both as input and + // output for an operator. + // NOTE: this changes consistent_ to be false if indices are out of bounds. + TfLiteStatus CheckInputAndOutputForOverlap(const int* input_indices, + int num_inputs, + const int* output_indices, + int num_outputs); + // Compute the number of bytes required to represent a tensor with dimensions // specified by the array dims (of length dims_size). Returns the status code // and bytes. From 6b9c3f29a35a2d8547692f561da35f2eff4b77d9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:00:09 -0700 Subject: [PATCH 059/243] [tflite] Ensure input tensors don't have `nullptr` buffers. A crafted TFLite model can force a node to have as input a tensor backed by a `nullptr` buffer. That is, by carefully changing the buffer index in the flatbuffer serialization, we can force the TFLite interpreter to consider a read-only tensor to be a read-write one and assume that there is an operator that has this tensor as output, writing to it and allocating memory before the tensor is used as input. If this does not happen, we get memory corruption. PiperOrigin-RevId: 332524692 Change-Id: I57ef175152a29020af9ab041dc959e5631dce40f --- tensorflow/lite/core/subgraph.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 8b1c144a8613b7..7ac4df08bb3e58 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/arena_planner.h" +#include "third_party/tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/core/api/tensor_utils.h" @@ -792,6 +793,19 @@ TfLiteStatus Subgraph::Invoke() { tensor->data_is_stale) { TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index)); } + if (tensor->data.raw == nullptr && tensor->bytes > 0) { + if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) { + // In general, having a tensor here with no buffer will be an error. + // However, for the reshape operator, the second input tensor is only + // used for the shape, not for the data. Thus, null buffer is ok. + continue; + } else { + // In all other cases, we need to return an error as otherwise we will + // trigger a null pointer dereference (likely). + ReportError("Input tensor %d lacks data", tensor_index); + return kTfLiteError; + } + } } if (check_cancelled_func_ != nullptr && From 5ba2508c5fd26396387235784834509b55d0315b Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 14:19:26 -0700 Subject: [PATCH 060/243] [tflite] Ensure `MatchingDim` does not allow buffer overflow. We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes. A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits. PiperOrigin-RevId: 332526127 Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4 --- tensorflow/lite/kernels/internal/types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/internal/types.h b/tensorflow/lite/kernels/internal/types.h index 38769d1bc0bb05..309c52ba52242f 100644 --- a/tensorflow/lite/kernels/internal/types.h +++ b/tensorflow/lite/kernels/internal/types.h @@ -432,7 +432,7 @@ int MatchingArraySize(const ArrayType1& array1, int index1, inline int MatchingDim(const RuntimeShape& shape1, int index1, const RuntimeShape& shape2, int index2) { TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); - return shape1.Dims(index1); + return std::min(shape1.Dims(index1), shape2.Dims(index2)); } template From e632175dea4e0399f8c139de98bdbba7e570b0c7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:18:51 -0700 Subject: [PATCH 061/243] [tflite] Ensure `ResolveAxis` properly handles negative inputs. In Python, a list `l` of length `n` allows indexing with negative indices, `l[i]`. The only constraint is that `n + i` becomes positive. Code in `ResolveAxis` assumes the constraints and only checks it using a `DCHECK`. But the macro is a no-op in non-debug builds and that can result in reading from negative offsets (buffer underflows). PiperOrigin-RevId: 332530683 Change-Id: I464e073fee618054ae3719a3679739007bb3f3bc --- tensorflow/lite/kernels/internal/reference/reference_ops.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/internal/reference/reference_ops.h b/tensorflow/lite/kernels/internal/reference/reference_ops.h index 8a0ab56a68912e..63d5d317571f5d 100644 --- a/tensorflow/lite/kernels/internal/reference/reference_ops.h +++ b/tensorflow/lite/kernels/internal/reference/reference_ops.h @@ -2654,6 +2654,9 @@ inline bool ResolveAxis(const int num_dims, const int* axis, // Handle negative index. int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx]; TFLITE_DCHECK(current >= 0 && current < num_dims); + if (current < 0 || current >= num_dims) { + return false; + } bool is_dup = false; for (int j = 0; j < *out_num_axis; ++j) { if (out_axis[j] == current) { From 188b047647e05ed07077a779e548b6b3909b9b44 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:38:39 -0700 Subject: [PATCH 062/243] Properly handle negative shape dimensions from improper saved models. PiperOrigin-RevId: 308283636 Change-Id: Ib10849425de7d541d8dacfe4d0c709fbac9180b6 --- tensorflow/cc/saved_model/loader.cc | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 7815dbd3a4c65c..075ff2e2e21545 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -19,12 +19,16 @@ limitations under the License. #include "tensorflow/cc/saved_model/constants.h" #include "tensorflow/cc/saved_model/reader.h" +#include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/protobuf_internal.h" #include "tensorflow/core/protobuf/graph_debug_info.pb.h" #include "tensorflow/core/protobuf/saver.pb.h" @@ -65,12 +69,34 @@ uint64 GetLatencyMicroseconds(const uint64 start_microseconds) { return end_microseconds - start_microseconds; } +// Ensure that constant tensors loaded from the saved model have valid shape. +// TODO(b/154763635): this is temporary and will be replaced with a better audit +static Status ValidateSavedTensors(const GraphDef& graph_def) { + for (const auto& node : graph_def.node()) { + const auto node_iterator = node.attr().find("value"); + if (node_iterator != node.attr().end()) { + AttrValue node_value = node_iterator->second; + if (node_value.has_tensor()) { + const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); + if (node_shape.num_elements() < 0) { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), "\" (op \"", + node.op(), "\") which initializes from a tensor with ", + node_shape.num_elements(), " elements"); + } + } + } + } + return Status::OK(); +} + Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def, const SessionOptions& session_options, std::unique_ptr* session) { Session* session_p = nullptr; TF_RETURN_IF_ERROR(NewSession(session_options, &session_p)); session->reset(session_p); + RETURN_IF_ERROR(ValidateSavedTensors(meta_graph_def.graph_def())); return (*session)->Create(meta_graph_def.graph_def()); } From 009c21ae1dd31f900715b3ee52e4c735726f6006 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:40:34 -0700 Subject: [PATCH 063/243] Prevent loading saved models where constant nodes have no tensor value. Also reorder fuzz generated test cases following f760f88b4267d981e13f4b302c437ae800445968 PiperOrigin-RevId: 308339007 Change-Id: I11d825203964cf3397846c57fd4a6f458e8536f3 --- tensorflow/cc/saved_model/loader.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 075ff2e2e21545..1bf2c280857e5f 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -70,6 +70,7 @@ uint64 GetLatencyMicroseconds(const uint64 start_microseconds) { } // Ensure that constant tensors loaded from the saved model have valid shape. +// Also ensure that constant nodes have a value assigned to them. // TODO(b/154763635): this is temporary and will be replaced with a better audit static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { @@ -85,6 +86,10 @@ static Status ValidateSavedTensors(const GraphDef& graph_def) { node_shape.num_elements(), " elements"); } } + } else if (node.op() == "Const") { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), + "\" which is a constant tensor but no value has been provided"); } } return Status::OK(); From 186d563b38a601abe7c9b71bc2a3cfd40cc2c944 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:43:13 -0700 Subject: [PATCH 064/243] Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`. We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail. PiperOrigin-RevId: 332536309 Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff --- tensorflow/cc/saved_model/loader.cc | 46 +++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 1bf2c280857e5f..6af7c716b4ce7a 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -20,6 +20,7 @@ limitations under the License. #include "tensorflow/cc/saved_model/constants.h" #include "tensorflow/cc/saved_model/reader.h" #include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/function.proto.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" @@ -72,26 +73,41 @@ uint64 GetLatencyMicroseconds(const uint64 start_microseconds) { // Ensure that constant tensors loaded from the saved model have valid shape. // Also ensure that constant nodes have a value assigned to them. // TODO(b/154763635): this is temporary and will be replaced with a better audit +static Status ValidateNode(const NodeDef& node) { + const auto node_iterator = node.attr().find("value"); + if (node_iterator != node.attr().end()) { + AttrValue node_value = node_iterator->second; + if (node_value.has_tensor()) { + const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); + if (node_shape.num_elements() < 0) { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), "\" (op \"", node.op(), + "\") which initializes from a tensor with ", + node_shape.num_elements(), " elements"); + } + } + } else if (node.op() == "Const") { + return errors::FailedPrecondition( + "Saved model contains node \"", node.name(), + "\" which is a constant tensor but no value has been provided"); + } + return Status::OK(); +} + static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { - const auto node_iterator = node.attr().find("value"); - if (node_iterator != node.attr().end()) { - AttrValue node_value = node_iterator->second; - if (node_value.has_tensor()) { - const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); - if (node_shape.num_elements() < 0) { - return errors::FailedPrecondition( - "Saved model contains node \"", node.name(), "\" (op \"", - node.op(), "\") which initializes from a tensor with ", - node_shape.num_elements(), " elements"); - } + TF_RETURN_IF_ERROR(ValidateNode(node)); + } + + if (graph_def.has_library()) { + const FunctionDefLibrary& library = graph_def.library(); + for (const auto& function : library.function()) { + for (const auto& node : function.node_def()) { + TF_RETURN_IF_ERROR(ValidateNode(node)); } - } else if (node.op() == "Const") { - return errors::FailedPrecondition( - "Saved model contains node \"", node.name(), - "\" which is a constant tensor but no value has been provided"); } } + return Status::OK(); } From 13ce9fe15a429e3bc179e91bceefdfda26e4540a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Sep 2020 18:49:51 -0700 Subject: [PATCH 065/243] Fix bad import --- tensorflow/lite/core/subgraph.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 7ac4df08bb3e58..483aa98a96de4d 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -18,7 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/arena_planner.h" -#include "third_party/tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/core/api/tensor_utils.h" From 503d7d1cca9da3b721964f087c6c8b6132e3c06d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 15:52:05 -0700 Subject: [PATCH 066/243] Validate `data_splits` for `tf.StringNGrams`. Without validation, we can cause a heap buffer overflow which results in data leakage and/or segfaults. PiperOrigin-RevId: 332543478 Change-Id: Iee5bda24497a195d09d122355502480830b1b317 --- tensorflow/core/kernels/string_ngrams_op.cc | 13 ++++++++++++ tensorflow/python/ops/raw_ops_test.py | 23 ++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index dc757a01fcf4d9..2fd36bf9771186 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -19,6 +19,7 @@ limitations under the License. #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace text { @@ -60,6 +61,18 @@ class StringNGramsOp : public tensorflow::OpKernel { OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat(); + // Validate that the splits are valid indices into data + const int input_data_size = data->flat().size(); + const int splits_vec_size = splits_vec.size(); + for (int i = 0; i < splits_vec_size; ++i) { + bool valid_splits = splits_vec(i) >= 0; + valid_splits = valid_splits && (splits_vec(i) <= input_data_size); + OP_REQUIRES( + context, valid_splits, + errors::InvalidArgument("Invalid split value ", splits_vec(i), + ", must be in [0,", input_data_size, "]")); + } + // If there is no data or size, return an empty RT. if (data->flat().size() == 0 || splits_vec.size() == 0) { tensorflow::Tensor* empty; diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index fff94f5c25ae8c..ad4f991a6a100e 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -18,16 +18,21 @@ from __future__ import division from __future__ import print_function +from absl.testing import parameterized + from tensorflow.python.eager import context from tensorflow.python.framework import constant_op +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_string_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes -class RawOpsTest(test.TestCase): +@test_util.disable_tfrt +class RawOpsTest(test.TestCase, parameterized.TestCase): def testSimple(self): x = constant_op.constant(1) @@ -58,6 +63,22 @@ def testDefaults(self): gen_math_ops.Any(input=x, axis=0), gen_math_ops.Any(input=x, axis=0, keep_dims=False)) + @parameterized.parameters([[0, 8]], [[-1, 6]]) + def testStringNGramsBadDataSplits(self, splits): + data = ["aa", "bb", "cc", "dd", "ee", "ff"] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Invalid split value"): + self.evaluate( + gen_string_ops.string_n_grams( + data=data, + data_splits=splits, + separator="", + ngram_widths=[2], + left_pad="", + right_pad="", + pad_width=0, + preserve_short_sequences=False)) + if __name__ == "__main__": ops.enable_eager_execution() From 1b9795ae2277c2044f2773cc7a53714a773f3909 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 16:23:20 -0700 Subject: [PATCH 067/243] Prevent segfault in `GetSessionHandle{,V2}`. In eager mode, session state is null. PiperOrigin-RevId: 332548597 Change-Id: If094812c2e094044220b9ba28f7d7601be042f38 --- tensorflow/core/kernels/session_ops.cc | 8 +++++++- tensorflow/python/ops/raw_ops_test.py | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc index d83a714452f2af..e7e73549bc32f3 100644 --- a/tensorflow/core/kernels/session_ops.cc +++ b/tensorflow/core/kernels/session_ops.cc @@ -16,6 +16,7 @@ limitations under the License. // See docs in ../ops/data_flow_ops.cc. #include + #include #include "tensorflow/core/common_runtime/device.h" @@ -27,6 +28,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" +#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" @@ -42,7 +44,11 @@ class GetSessionHandleOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); - int64 id = ctx->session_state()->GetNewId(); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "GetSessionHandle called on null session state")); + int64 id = session_state->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index ad4f991a6a100e..0dbd7dcb9169e7 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -25,6 +25,7 @@ from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util +from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_string_ops from tensorflow.python.platform import test @@ -79,6 +80,13 @@ def testStringNGramsBadDataSplits(self, splits): pad_width=0, preserve_short_sequences=False)) + def testGetSessionHandle(self): + if context.executing_eagerly(): + with self.assertRaisesRegex( + errors.FailedPreconditionError, + "GetSessionHandle called on null session state"): + gen_data_flow_ops.GetSessionHandle(value=[1]) + if __name__ == "__main__": ops.enable_eager_execution() From 15c5a3ac6895a29517ceda7d22e6297306114904 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 16:54:17 -0700 Subject: [PATCH 068/243] Prevent format string vulnerability in `tf.strings.as_string`. The `printf` format specifier only allows `#`, `0`, `-`, `+` and space as flag characters. Others are interpreted as width/precision/length modifier or conversion specifiers. If a character does not fit into any of these sets `printf` just displays it. Also add a test suite for `tf.strings.as_string`. Also fix the issue where the flag character was used only if width was specified. PiperOrigin-RevId: 332553548 Change-Id: Ie57cf2a7c14d1a36097642794c14329db669bbba --- tensorflow/core/kernels/BUILD | 18 ++ tensorflow/core/kernels/as_string_op.cc | 19 +- tensorflow/core/kernels/as_string_op_test.cc | 245 +++++++++++++++++++ 3 files changed, 281 insertions(+), 1 deletion(-) create mode 100644 tensorflow/core/kernels/as_string_op_test.cc diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index ac9c6299833e44..ca79757899b98c 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -5713,6 +5713,24 @@ tf_kernel_library( deps = STRING_DEPS, ) +tf_cc_test( + name = "as_string_op_test", + size = "small", + srcs = ["as_string_op_test.cc"], + deps = [ + ":as_string_op", + ":ops_testutil", + ":ops_util", + "//tensorflow/core:core_cpu", + "//tensorflow/core:framework", + "//tensorflow/core:lib", + "//tensorflow/core:protos_all_cc", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + "//tensorflow/core:testlib", + ], +) + tf_kernel_library( name = "unicode_ops", prefix = "unicode_ops", diff --git a/tensorflow/core/kernels/as_string_op.cc b/tensorflow/core/kernels/as_string_op.cc index 8341909fbc8409..b9af976a654d99 100644 --- a/tensorflow/core/kernels/as_string_op.cc +++ b/tensorflow/core/kernels/as_string_op.cc @@ -65,9 +65,26 @@ class AsStringOp : public OpKernel { OP_REQUIRES(ctx, !(scientific && shortest), errors::InvalidArgument( "Cannot select both scientific and shortest notation")); + format_ = "%"; + if (!fill_string.empty()) { + switch (fill_string[0]) { + case ' ': + case '+': + case '-': + case '0': + case '#': + strings::Appendf(&format_, "%s", fill_string.c_str()); + break; + default: + bool fill_not_supported = true; + OP_REQUIRES(ctx, !fill_not_supported, + errors::InvalidArgument("Fill argument not supported: \"", + fill_string, "\"")); + } + } if (width > -1) { - strings::Appendf(&format_, "%s%d", fill_string.c_str(), width); + strings::Appendf(&format_, "%d", width); } if (precision > -1) { strings::Appendf(&format_, ".%d", precision); diff --git a/tensorflow/core/kernels/as_string_op_test.cc b/tensorflow/core/kernels/as_string_op_test.cc new file mode 100644 index 00000000000000..dff78e25e72025 --- /dev/null +++ b/tensorflow/core/kernels/as_string_op_test.cc @@ -0,0 +1,245 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/framework/types.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/kernels/ops_util.h" +#include "tensorflow/core/lib/core/status_test_util.h" + +namespace tensorflow { +namespace { + +class AsStringGraphTest : public OpsTestBase { + protected: + Status Init(DataType input_type, const string& fill = "", int width = -1, + int precision = -1, bool scientific = false, + bool shortest = false) { + TF_CHECK_OK(NodeDefBuilder("op", "AsString") + .Input(FakeInput(input_type)) + .Attr("fill", fill) + .Attr("precision", precision) + .Attr("scientific", scientific) + .Attr("shortest", shortest) + .Attr("width", width) + .Finalize(node_def())); + return InitOp(); + } +}; + +TEST_F(AsStringGraphTest, Int8) { + TF_ASSERT_OK(Init(DT_INT8)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42", "0", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Int64) { + TF_ASSERT_OK(Init(DT_INT64)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42", "0", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatDefault) { + TF_ASSERT_OK(Init(DT_FLOAT)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues( + &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatScientific) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-4.200000e+01", "0.000000e+00", + "3.141590e+00", "4.200000e+01"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatShortest) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/false, /*shortest=*/true)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42", "0", "3.14159", "42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatPrecisionOnly) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/2)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42.00", "0.00", "3.14", "42.00"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FloatWidthOnly) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues( + &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Float_5_2_Format) { + TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5, /*precision=*/2)); + + AddInputFromArray(TensorShape({4}), {-42, 0, 3.14159, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({4})); + test::FillValues(&expected, {"-42.00", " 0.00", " 3.14", "42.00"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Complex) { + TF_ASSERT_OK(Init(DT_COMPLEX64, /*fill=*/"", /*width=*/5, /*precision=*/2)); + + AddInputFromArray(TensorShape({3}), {{-4, 2}, {0}, {3.14159, -1}}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues( + &expected, {"(-4.00, 2.00)", "( 0.00, 0.00)", "( 3.14,-1.00)"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, Bool) { + TF_ASSERT_OK(Init(DT_BOOL)); + + AddInputFromArray(TensorShape({2}), {true, false}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({2})); + test::FillValues(&expected, {"true", "false"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, String) { + Status s = Init(DT_STRING); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "Value for attr 'T' of string is not in the list of allowed values")); +} + +TEST_F(AsStringGraphTest, OnlyOneOfScientificAndShortest) { + Status s = Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true, /*shortest=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), + "Cannot select both scientific and shortest notation")); +} + +TEST_F(AsStringGraphTest, NoShortestForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/false, /*shortest=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "scientific and shortest format not supported for datatype")); +} + +TEST_F(AsStringGraphTest, NoScientificForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1, + /*scientific=*/true); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains( + s.error_message(), + "scientific and shortest format not supported for datatype")); +} + +TEST_F(AsStringGraphTest, NoPrecisionForNonFloat) { + Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/5); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains(s.error_message(), + "precision not supported for datatype")); +} + +TEST_F(AsStringGraphTest, LongFill) { + Status s = Init(DT_INT32, /*fill=*/"asdf"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE(absl::StrContains(s.error_message(), + "Fill string must be one or fewer characters")); +} + +TEST_F(AsStringGraphTest, FillWithZero) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"0", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-042", "0000", "0042"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithSpace) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/" ", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {" -42", " 0", " 42"}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithChar1) { + TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"-", /*width=*/4)); + + AddInputFromArray(TensorShape({3}), {-42, 0, 42}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected(allocator(), DT_STRING, TensorShape({3})); + test::FillValues(&expected, {"-42 ", "0 ", "42 "}); + test::ExpectTensorEqual(expected, *GetOutput(0)); +} + +TEST_F(AsStringGraphTest, FillWithChar3) { + Status s = Init(DT_INT32, /*fill=*/"s"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), "Fill argument not supported")); +} + +TEST_F(AsStringGraphTest, FillWithChar4) { + Status s = Init(DT_INT32, /*fill=*/"n"); + ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); + ASSERT_TRUE( + absl::StrContains(s.error_message(), "Fill argument not supported")); +} + +} // end namespace +} // end namespace tensorflow From da8a73a80cceb20f1dd5ecec26508f8e506a511e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 17:21:24 -0700 Subject: [PATCH 069/243] Prevent `int64` to `int` truncation in `Shard` API usage. The function argument in `Shard` must be a function of two `int64` arguments. However, we are passing in a function with two `int` arguments. Thus, for large workloads, these arguments get truncated from positive `int64` values to negative `int` ones, resulting in a buffer out of bounds write. PiperOrigin-RevId: 332557334 Change-Id: I236c9a2e7f53580e520571da8ba941a3aa9fa0b5 --- tensorflow/core/kernels/random_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/random_op.cc b/tensorflow/core/kernels/random_op.cc index 2fa93fb529cbcb..45379414710ae8 100644 --- a/tensorflow/core/kernels/random_op.cc +++ b/tensorflow/core/kernels/random_op.cc @@ -204,7 +204,7 @@ class RandomGammaOp : public OpKernel { // avoid a couple flops which can be done on a per-alpha basis. auto DoWork = [num_samples, num_alphas, &rng, samples_flat, alpha_flat]( - int start_output, int limit_output) { + int64 start_output, int64 limit_output) { using Eigen::numext::exp; using Eigen::numext::log; using Eigen::numext::pow; From 1740085f238d49cfa6e49b0c6bdb2383e9ffec1c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 17:49:02 -0700 Subject: [PATCH 070/243] Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767 --- tensorflow/core/kernels/boosted_trees/prediction_ops.cc | 6 +++--- tensorflow/core/kernels/nth_element_op.cc | 3 ++- .../core/kernels/parameterized_truncated_normal_op.cc | 4 ++-- tensorflow/core/kernels/random_binomial_op.cc | 2 +- tensorflow/core/kernels/random_poisson_op.cc | 2 +- tensorflow/core/kernels/topk_op.cc | 2 +- 6 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc index 19be606f184939..e3a908d1b6b20d 100644 --- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc +++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc @@ -121,7 +121,7 @@ class BoostedTreesTrainingPredictOp : public OpKernel { auto do_work = [&resource, &bucketized_features, &cached_tree_ids, &cached_node_ids, &output_partial_logits, &output_node_ids, latest_tree, - this](int32 start, int32 end) { + this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { int32 tree_id = cached_tree_ids(i); int32 node_id = cached_node_ids(i); @@ -237,7 +237,7 @@ class BoostedTreesPredictOp : public OpKernel { const int32 last_tree = resource->num_trees() - 1; auto do_work = [&resource, &bucketized_features, &output_logits, last_tree, - this](int32 start, int32 end) { + this](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { std::vector tree_logits(logits_dimension_, 0.0); int32 tree_id = 0; @@ -340,7 +340,7 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel { // path. Note: feature_ids has one less value than logits_path because the // first value of each logit path will be the bias. auto do_work = [&resource, &bucketized_features, &output_debug_info, - last_tree](int32 start, int32 end) { + last_tree](int64 start, int64 end) { for (int32 i = start; i < end; ++i) { // Proto to store debug outputs, per example. boosted_trees::DebugOutput example_debug_info; diff --git a/tensorflow/core/kernels/nth_element_op.cc b/tensorflow/core/kernels/nth_element_op.cc index 0e43cc19aae513..bd523f51e27e2d 100644 --- a/tensorflow/core/kernels/nth_element_op.cc +++ b/tensorflow/core/kernels/nth_element_op.cc @@ -95,7 +95,8 @@ struct NthElementFunctor { const int last_dim = input_tensor.dim_size(input_tensor.dims() - 1); // Allocate each row to different shard. - auto SubNthElement = [&, input, output, last_dim, n](int start, int limit) { + auto SubNthElement = [&, input, output, last_dim, n](int64 start, + int64 limit) { // std::nth_element would rearrange the array, so we need a new buffer. std::vector buf(last_dim); diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc index 09dc3ffd12910f..b0b720b4e030fa 100644 --- a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc +++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc @@ -69,8 +69,8 @@ struct TruncatedNormalFunctor { auto DoWork = [samples_per_batch, num_elements, &ctx, &means, &stddevs, &minvals, &maxvals, &gen, &output, - kStdDevsInsideBoundsToUseRandnSampler](int start_batch, - int limit_batch) { + kStdDevsInsideBoundsToUseRandnSampler](int64 start_batch, + int64 limit_batch) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. diff --git a/tensorflow/core/kernels/random_binomial_op.cc b/tensorflow/core/kernels/random_binomial_op.cc index df27541bb66efa..f89ff5e986d882 100644 --- a/tensorflow/core/kernels/random_binomial_op.cc +++ b/tensorflow/core/kernels/random_binomial_op.cc @@ -176,7 +176,7 @@ struct RandomBinomialFunctor { auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto DoWork = [samples_per_batch, num_elements, &counts, &probs, &gen, - &output](int start_batch, int limit_batch) { + &output](int64 start_batch, int64 limit_batch) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. diff --git a/tensorflow/core/kernels/random_poisson_op.cc b/tensorflow/core/kernels/random_poisson_op.cc index 64fb4a5c228480..7962c4322075a6 100644 --- a/tensorflow/core/kernels/random_poisson_op.cc +++ b/tensorflow/core/kernels/random_poisson_op.cc @@ -103,7 +103,7 @@ struct PoissonFunctor { typedef random::UniformDistribution Uniform; auto DoWork = [num_samples, num_rate, &rng, samples_flat, rate_flat]( - int start_output, int limit_output) { + int64 start_output, int64 limit_output) { // Capturing "rng" by value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "rng" by reference and explicitly do a copy assignment. diff --git a/tensorflow/core/kernels/topk_op.cc b/tensorflow/core/kernels/topk_op.cc index 02b99e44880a56..327ed5bfa25f29 100644 --- a/tensorflow/core/kernels/topk_op.cc +++ b/tensorflow/core/kernels/topk_op.cc @@ -136,7 +136,7 @@ struct TopKFunctor { return Status::OK(); } - auto SortIndices = [&](int start_batch, int limit_batch) { + auto SortIndices = [&](int64 start_batch, int64 limit_batch) { for (int32 b = start_batch; b < limit_batch; ++b) { const T* input_data = &input(b, 0); const auto stable_comp = [input_data](const int32 a, const int32 b) { From 89e10b0ea5bdf9f2050e4db0194d79dfc289ffbd Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 18:43:38 -0700 Subject: [PATCH 071/243] Fix heap buffer overflow in `tf.raw_ops.SparseFillEmptyRowsGrad`. Also add tests as they were lacking PiperOrigin-RevId: 332566071 Change-Id: I44277578e26ff5fb3fdb0dcbba6e91b2ec3e7859 --- .../core/kernels/sparse_fill_empty_rows_op.cc | 12 ++++- tensorflow/python/ops/sparse_ops_test.py | 54 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index c9365be5119391..f674836cb8036f 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -213,6 +213,9 @@ class SparseFillEmptyRowsGradOp : public OpKernel { context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); + OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), + errors::InvalidArgument("grad_values must be a vector, saw: ", + grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec(); const auto grad_values = grad_values_t->vec(); @@ -241,8 +244,13 @@ class SparseFillEmptyRowsGradOp : public OpKernel { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. - d_values(i) = grad_values(reverse_index_map(i)); - visited(reverse_index_map(i)) = true; + int64 reverse_index = reverse_index_map(i); + OP_REQUIRES( + context, 0 <= reverse_index && reverse_index < N_full, + errors::InvalidArgument("Elements in reverse index must be in [0, ", + N_full, ") but got ", reverse_index)); + d_values(i) = grad_values(reverse_index); + visited(reverse_index) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of diff --git a/tensorflow/python/ops/sparse_ops_test.py b/tensorflow/python/ops/sparse_ops_test.py index 90dbded64329f2..1c34e16898fae9 100644 --- a/tensorflow/python/ops/sparse_ops_test.py +++ b/tensorflow/python/ops/sparse_ops_test.py @@ -21,6 +21,7 @@ from absl.testing import parameterized import numpy as np +from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops @@ -28,6 +29,7 @@ from tensorflow.python.framework import test_util # Need array_grad to register gradient for Identity. from tensorflow.python.ops import array_grad # pylint: disable=unused-import +from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import gradient_checker_v2 as gradient_checker from tensorflow.python.ops import math_ops # Need sparse_grad to register gradient for SparseToDense. @@ -144,5 +146,57 @@ def testSparseTensorToDenseString(self): self.assertAllEqual(expected_dense, result_dense) +@test_util.run_all_in_graph_and_eager_modes +class RawOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): + + def testSparseFillEmptyRowsGrad(self): + reverse_index_map = [2, 1] + grad_values = [0, 1, 2, 3] + d_values, d_default_value = self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + self.assertAllEqual([2, 1], d_values) + self.assertEqual(3, d_default_value) + + def testSparseFillEmptyRowsGradNegativeIndexMapValue(self): + reverse_index_map = [2, -1] + grad_values = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r'Elements in reverse index must be in \[0, 4\)'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + def testSparseFillEmptyRowsGradLargeIndexMapValue(self): + reverse_index_map = [2, 10] + grad_values = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + r'Elements in reverse index must be in \[0, 4\)'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + def testSparseFillEmptyRowsGradMatrix(self): + reverse_index_map = [0, 1] + grad_values = [[0, 1], [2, 3]] + # Note: Eager mode and graph mode throw different errors here. Graph mode + # will fail with a ValueError from the shape checking logic, while Eager + # will fail with an InvalidArgumentError from the kernel itself. + if context.executing_eagerly(): + with self.assertRaisesRegex(errors.InvalidArgumentError, + r'grad_values must be a vector'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + else: + with self.assertRaisesRegex(ValueError, + r'Shape must be rank 1 but is rank 2'): + self.evaluate( + gen_sparse_ops.SparseFillEmptyRowsGrad( + reverse_index_map=reverse_index_map, grad_values=grad_values)) + + if __name__ == '__main__': googletest.main() From 7a8ccaca69ddd0863abee77bd5542d06623bcba4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 18 Sep 2020 21:16:05 -0700 Subject: [PATCH 072/243] Fix undefined behavior in `tf.raw_ops.Switch` in eager mode. PiperOrigin-RevId: 332578058 Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9 --- tensorflow/core/common_runtime/eager/kernel_and_device.cc | 7 ++++++- .../python/kernel_tests/control_flow_ops_py_test.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc index 3afc9ad9a62bf8..e7760b74d841ff 100644 --- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc +++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc @@ -323,7 +323,12 @@ Status KernelAndDeviceOp::Run( if (outputs != nullptr) { outputs->clear(); for (int i = 0; i < context.num_outputs(); ++i) { - outputs->push_back(Tensor(*context.mutable_output(i))); + const auto* output_tensor = context.mutable_output(i); + if (output_tensor != nullptr) { + outputs->push_back(Tensor(*output_tensor)); + } else { + outputs->push_back(Tensor()); + } } } return Status::OK(); diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py index 64da8352419761..3c3fcdfdd096dc 100644 --- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py +++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py @@ -4542,6 +4542,14 @@ def testUInt64SwitchMerge(self): result = control_flow_ops.merge([v_f, v_t]) self.evaluate(result) + def testSwitchEagerMode(self): + if not context.executing_eagerly(): + return + input_data = [1, 2, 3, 4] + vf, vt = control_flow_ops.switch(input_data, False) + self.assertAllEqual(vf, input_data) + self.assertAllEqual(vt, []) + @test_util.run_deprecated_v1 def testQIntArgAndRet(self): From 1e3a2e234def95565f77202bd1e468e7be945f4c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 15:24:40 -0700 Subject: [PATCH 073/243] Solve leftover from merge conflict --- tensorflow/cc/saved_model/loader.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 6af7c716b4ce7a..aa5b697d070d7b 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -20,7 +20,7 @@ limitations under the License. #include "tensorflow/cc/saved_model/constants.h" #include "tensorflow/cc/saved_model/reader.h" #include "tensorflow/core/framework/attr_value.pb.h" -#include "tensorflow/core/framework/function.proto.h" +#include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" From 7a377c92fa06ca46b71bca74549b172162df7f32 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 15:58:56 -0700 Subject: [PATCH 074/243] Fix import path --- tensorflow/cc/saved_model/loader.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index aa5b697d070d7b..1cb602a03e42cd 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -23,13 +23,13 @@ limitations under the License. #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" +#include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" -#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/protobuf_internal.h" #include "tensorflow/core/protobuf/graph_debug_info.pb.h" #include "tensorflow/core/protobuf/saver.pb.h" From d2f89833e04f9729aaad50f97917eabe01010e00 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 16:36:54 -0700 Subject: [PATCH 075/243] Fix typo in macro --- tensorflow/cc/saved_model/loader.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc index 1cb602a03e42cd..fa62816637f8c9 100644 --- a/tensorflow/cc/saved_model/loader.cc +++ b/tensorflow/cc/saved_model/loader.cc @@ -117,7 +117,7 @@ Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def, Session* session_p = nullptr; TF_RETURN_IF_ERROR(NewSession(session_options, &session_p)); session->reset(session_p); - RETURN_IF_ERROR(ValidateSavedTensors(meta_graph_def.graph_def())); + TF_RETURN_IF_ERROR(ValidateSavedTensors(meta_graph_def.graph_def())); return (*session)->Create(meta_graph_def.graph_def()); } From 8db974a1add4ba633b54b44000a2a75722bfbac1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 17:04:53 -0700 Subject: [PATCH 076/243] Fix import path --- tensorflow/core/kernels/string_ngrams_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 2fd36bf9771186..62d5d22413544f 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -19,7 +19,7 @@ limitations under the License. #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/lib/core/errors.h" namespace tensorflow { namespace text { From 92ac854068e9b83e5ec682cf61943a5fbcbadce7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 17:49:20 -0700 Subject: [PATCH 077/243] Remove import that is not needed --- tensorflow/core/kernels/session_ops.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc index e7e73549bc32f3..c2d382b49de531 100644 --- a/tensorflow/core/kernels/session_ops.cc +++ b/tensorflow/core/kernels/session_ops.cc @@ -28,7 +28,6 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" -#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" From 468f38f529957c6dc8550cd8942ff8385321bddf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 20:16:38 -0700 Subject: [PATCH 078/243] No `disable_tfrt` present on this branch --- tensorflow/python/ops/raw_ops_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index 0dbd7dcb9169e7..089db57576c00e 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -32,7 +32,6 @@ @test_util.run_all_in_graph_and_eager_modes -@test_util.disable_tfrt class RawOpsTest(test.TestCase, parameterized.TestCase): def testSimple(self): From 9178a24df6bd16a53ca7717465a1d0579f8915d4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 20 Sep 2020 20:17:26 -0700 Subject: [PATCH 079/243] Add missing import --- tensorflow/python/ops/sparse_ops_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/python/ops/sparse_ops_test.py b/tensorflow/python/ops/sparse_ops_test.py index 1c34e16898fae9..7a43639c9831fb 100644 --- a/tensorflow/python/ops/sparse_ops_test.py +++ b/tensorflow/python/ops/sparse_ops_test.py @@ -24,6 +24,7 @@ from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util From 6cab6db4bb50130268655dd0149fdf3aab8fcd3f Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Mon, 21 Sep 2020 17:16:21 -0700 Subject: [PATCH 080/243] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 65943834236909..ecda274fc93ad4 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.1.2 + + + # Release 2.1.1 ## Bug Fixes and Other Changes From 8d60c3f32f37fedf9da89f28c56f8d077eece1c5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 21 Sep 2020 18:47:06 -0700 Subject: [PATCH 081/243] Update RELEASE.md --- RELEASE.md | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index ecda274fc93ad4..aecf5e2a0e4cb7 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,47 @@ # Release 2.1.2 - +## Bug Fixes and Other Changes +* Fixes an undefined behavior causing a segfault in `tf.raw_ops.Switch` + ([CVE-2020-15190](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15190)) +* Fixes three vulnerabilities in conversion to DLPack format + ([CVE-2020-15191](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15191), + [CVE-2020-15192](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15192), + [CVE-2020-15193](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15193)) +* Fixes two vulnerabilities in `SparseFillEmptyRowsGrad` + ([CVE-2020-15194](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15194), + [CVE-2020-15195](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15195)) +* Fixes an integer truncation vulnerability in code using the work sharder API + ([CVE-2020-15202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15202)) +* Fixes a format string vulnerability in `tf.strings.as_string` + ([CVE-2020-15203](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15203)) +* Fixes segfault raised by calling session-only ops in eager mode + ([CVE-2020-15204](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15204)) +* Fixes data leak and potential ASLR violation from `tf.raw_ops.StringNGrams` + ([CVE-2020-15205](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15205)) +* Fixes segfaults caused by incomplete `SavedModel` validation + ([CVE-2020-15206](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15206)) +* Fixes a data corruption due to a bug in negative indexing support in TFLite + ([CVE-2020-15207](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15207)) +* Fixes a data corruption due to dimension mismatch in TFLite + ([CVE-2020-15208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15208)) +* Fixes several vulnerabilities in TFLite saved model format + ([CVE-2020-15209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15209), + [CVE-2020-15210](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15210), + [CVE-2020-15211](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15211)) +* Updates `sqlite3` to `3.33.00` to handle + [CVE-2020-9327](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-9327), + [CVE-2020-11655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11655), + [CVE-2020-11656](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11656), + [CVE-2020-13434](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13434), + [CVE-2020-13435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13435), + [CVE-2020-13630](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13630), + [CVE-2020-13631](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13631), + [CVE-2020-13871](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13871), + and + [CVE-2020-15358](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15358). +* Removes `scipy` dependency from `setup.py` since TensorFlow does not need it + to install the pip package +* Switches ROCM builds to use ROCM 3.7 # Release 2.1.1 From d5ba28c046d38dac42f2a6f740436e4bb3565e08 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Mon, 21 Sep 2020 18:51:14 -0700 Subject: [PATCH 082/243] Update version numbers to 2.1.2 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 5460bceaab1ed0..c54b9b6ac22b4f 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 1 -#define TF_PATCH_VERSION 1 +#define TF_PATCH_VERSION 2 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index de1d35be4f0040..7ba22b6b49c553 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -54,7 +54,7 @@ def register_extension_info(**kwargs): # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.1.1" +VERSION = "2.1.2" VERSION_MAJOR = VERSION.split(".")[0] def if_v2(a): diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index e0468fa82ae4bf..fea8b38b129f98 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -47,7 +47,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.1.1' +_VERSION = '2.1.2' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From c82c51e156334917513143646b84bb3b2156e1be Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 23 Sep 2020 17:03:42 +0000 Subject: [PATCH 083/243] Updating rocm_configure.bzl to pull in LLVM 12.0 headers (for ROCm 3.9) --- third_party/gpus/rocm_configure.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/third_party/gpus/rocm_configure.bzl b/third_party/gpus/rocm_configure.bzl index d72b3f1d14e1f0..760f213a0114c9 100644 --- a/third_party/gpus/rocm_configure.bzl +++ b/third_party/gpus/rocm_configure.bzl @@ -204,6 +204,7 @@ def _rocm_include_path(repository_ctx, rocm_config): inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/9.0.0/include") inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/10.0.0/include") inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/11.0.0/include") + inc_dirs.append(rocm_config.rocm_toolkit_path + "/llvm/lib/clang/12.0.0/include") # Add rocrand and hiprand headers inc_dirs.append(rocm_config.rocm_toolkit_path + "/rocrand/include") From df9ab6f4804727f8735ef0de9367247837a2825d Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Mon, 21 Sep 2020 17:03:00 +0000 Subject: [PATCH 084/243] Fix for TF build failure with ROCm 3.9 (error: call to 'min' is ambiguous) When building TF with ROCm 3.9, we are running into the following compile error ``` In file included from tensorflow/core/kernels/reduction_ops_half_mean_sum.cu.cc:20: ./tensorflow/core/kernels/reduction_gpu_kernels.cu.h:430:9: error: call to 'min' is ambiguous min(blockDim.y, num_rows - blockIdx.y * blockDim.y); ^~~ /opt/rocm-3.9.0-3805/llvm/lib/clang/12.0.0/include/__clang_hip_math.h:1183:23: note: candidate function __DEVICE__ inline int min(int __arg1, int __arg2) { ^ /opt/rocm-3.9.0-3805/llvm/lib/clang/12.0.0/include/__clang_hip_math.h:1197:14: note: candidate function inline float min(float __x, float __y) { return fminf(__x, __y); } ^ /opt/rocm-3.9.0-3805/llvm/lib/clang/12.0.0/include/__clang_hip_math.h:1200:15: note: candidate function inline double min(double __x, double __y) { return fmin(__x, __y); } ^ 1 error generated when compiling for gfx803. ``` The build error seems to be because ROCm 3.9 uses llvm header files from `llvm/lib/clang/12.0.0/include` (ROCm 3.8 uses the `11.0.0` version). `12.0.0` has a new `__clang_hip_math.h` file, which is not present in `11.0.0`. This file has the `min` function overloaded for the `float` and `double` types. The first argument in the call to `min` (which leads to the error) is `blockDim.y` which has a `uint` type, and hence the compiler gets confused as to which overloaded type to resole to. Previously (i.e. ROCm 3.8 and before) there was only one option (`int`), with ROCm 3.9 there are three (`int`, `float`, and `double`) and hence the error. The "fix" is to explicitly cast the first argument to `int` to remove the ambiguity (the second argument is already an `int` type). --- tensorflow/core/kernels/reduction_gpu_kernels.cu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/reduction_gpu_kernels.cu.h b/tensorflow/core/kernels/reduction_gpu_kernels.cu.h index 68a31e16286812..23d913038598e3 100644 --- a/tensorflow/core/kernels/reduction_gpu_kernels.cu.h +++ b/tensorflow/core/kernels/reduction_gpu_kernels.cu.h @@ -388,7 +388,7 @@ __global__ __launch_bounds__(1024) void ColumnReduceKernel( // - = // = const int numRowsThisBlock = - min(blockDim.y, num_rows - blockIdx.y * blockDim.y); + min(int(blockDim.y), num_rows - blockIdx.y * blockDim.y); for (int row = 1; row < numRowsThisBlock; ++row) { value_type t = partial_sums[threadIdx.x * (TF_RED_WARPSIZE + 1) + row]; From ab35f2bf7132f9d20a0bea9a5d1849862737d4b4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 23 Sep 2020 16:30:37 -0700 Subject: [PATCH 085/243] Disable a bunch of tests on Windows GPU --- tensorflow/python/BUILD | 1 + tensorflow/python/kernel_tests/BUILD | 36 +++++++++++++++++---- tensorflow/python/kernel_tests/linalg/BUILD | 2 ++ tensorflow/python/kernel_tests/signal/BUILD | 1 + tensorflow/python/ops/parallel_for/BUILD | 1 + 5 files changed, 35 insertions(+), 6 deletions(-) diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index ad0e41d5584f6a..b105890c29084b 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -4460,6 +4460,7 @@ cuda_py_test( ":platform", "//third_party/py/numpy", ], + tags = ["no_windows_gpu"], ) cuda_py_test( diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index 3ba9dbf376c356..a77aac8cac1546 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -232,6 +232,7 @@ cuda_py_test( shard_count = 5, tags = [ "no_rocm", # TODO(rocm): feature not supported on ROCm platform + "no_windows_gpu", "nomsan", # TODO(b/131773093): Re-enable. ], ) @@ -711,7 +712,10 @@ cuda_py_test( "//tensorflow/python:linalg_ops", "//tensorflow/python:math_ops", ], - tags = ["optonly"], + tags = [ + "no_windows_gpu", + "optonly", + ], ) cuda_py_test( @@ -726,6 +730,7 @@ cuda_py_test( "//tensorflow/python:linalg_ops", "//tensorflow/python:math_ops", ], + tags = ["no_windows_gpu"], ) cuda_py_test( @@ -1628,6 +1633,7 @@ cuda_py_test( "//tensorflow/python:math_ops", ], shard_count = 20, + tags = ["no_windows_gpu"], ) cuda_py_test( @@ -2183,6 +2189,7 @@ cuda_py_test( "//tensorflow/python:variables", ], shard_count = 20, + tags = ["no_windows_gpu"], ) cuda_py_test( @@ -3295,6 +3302,7 @@ cuda_py_test( "//tensorflow/python:variables", ], shard_count = 50, + tags = ["no_windows_gpu"], ) cuda_py_test( @@ -3337,6 +3345,7 @@ cuda_py_test( "//tensorflow/python:variables", ], shard_count = 50, + tags = ["no_windows_gpu"], # b/140155706: nans in result xla_enable_strict_auto_jit = False, ) @@ -3807,7 +3816,10 @@ cuda_py_test( "//tensorflow/python:linalg_ops", ], shard_count = 10, - tags = ["no_rocm"], + tags = [ + "no_rocm", + "no_windows_gpu", + ], ) cuda_py_test( @@ -3820,7 +3832,10 @@ cuda_py_test( ], main = "sparse_csr_matrix_ops_test.py", shard_count = 10, - tags = ["no_rocm"], + tags = [ + "no_rocm", + "no_windows_gpu", + ], ) cuda_py_test( @@ -3831,7 +3846,10 @@ cuda_py_test( "//tensorflow/python/ops/linalg/sparse", ], main = "csr_sparse_matrix_test.py", - tags = ["no_rocm"], + tags = [ + "no_rocm", + "no_windows_gpu", + ], ) cuda_py_test( @@ -3855,7 +3873,10 @@ cuda_py_test( ], main = "sparse_csr_matrix_dense_mat_mul_grad_test.py", shard_count = 50, - tags = ["no_rocm"], + tags = [ + "no_rocm", + "no_windows_gpu", + ], ) cuda_py_test( @@ -3867,5 +3888,8 @@ cuda_py_test( ], main = "sparse_csr_matrix_sparse_mat_mul_grad_test.py", shard_count = 50, - tags = ["no_rocm"], + tags = [ + "no_rocm", + "no_windows_gpu", + ], ) diff --git a/tensorflow/python/kernel_tests/linalg/BUILD b/tensorflow/python/kernel_tests/linalg/BUILD index b428356cc24095..4c7f619a2ed20a 100644 --- a/tensorflow/python/kernel_tests/linalg/BUILD +++ b/tensorflow/python/kernel_tests/linalg/BUILD @@ -57,6 +57,7 @@ cuda_py_test( shard_count = 5, tags = [ "noasan", # times out, b/63678675 + "no_windows_gpu", "optonly", # times out ], ) @@ -140,6 +141,7 @@ cuda_py_test( shard_count = 10, tags = [ "no_rocm", # calls BLAS ops for complex types + "no_windows_gpu", "noasan", # times out, b/63678675 "optonly", # times out, b/79171797 ], diff --git a/tensorflow/python/kernel_tests/signal/BUILD b/tensorflow/python/kernel_tests/signal/BUILD index 7836d4778cdf86..9096d933d42cc6 100644 --- a/tensorflow/python/kernel_tests/signal/BUILD +++ b/tensorflow/python/kernel_tests/signal/BUILD @@ -48,6 +48,7 @@ cuda_py_tests( shard_count = 8, tags = [ "no_rocm", + "no_windows_gpu", "optonly", ], ) diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD index 50c31f5c728d51..be05700d0d26fd 100644 --- a/tensorflow/python/ops/parallel_for/BUILD +++ b/tensorflow/python/ops/parallel_for/BUILD @@ -172,6 +172,7 @@ cuda_py_test( shard_count = 5, tags = [ "no_rocm", + "no_windows_gpu", "optonly", # Too slow in non-opt mode ], ) From 5dbd0a2046df47c6f8a28891284c1ee3e0ed0d9c Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Sat, 26 Sep 2020 00:21:19 +0000 Subject: [PATCH 086/243] Adding "-fcuda-flush-denormals-to-zero" as a default hipcc option Prior to ROCm 3.8, hipcc (hipclang) flushed denormal values to zero by default. Starting with ROCm 3.8 that is no longer true, denormal values are kept as is. TF expects denormals to be flushed to zero. This is enforced on the CUDA side by explicitly passing the "-fcuda-flush-denormals-to-zero" (see tensorflow.bzl). This commit does the same for the ROCm side. Also removing the no_rocm tag from the corresponding unit test - //tensorflow/python/kernel_tests:denormal_test_gpu --- .../gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl | 1 + 1 file changed, 1 insertion(+) diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl index 89275128a9c6b0..df473138244291 100755 --- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl +++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_rocm.tpl @@ -186,6 +186,7 @@ def InvokeHipcc(argv, log=False): # of link time. This allows the default host compiler (gcc) be used as the # linker for TensorFlow on ROCm platform. hipccopts += ' -fno-gpu-rdc ' + hipccopts += ' -fcuda-flush-denormals-to-zero ' hipccopts += undefines hipccopts += defines hipccopts += std_options From 472207d95c85ba548f663a1cdd39c41839e9aedc Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Thu, 17 Sep 2020 00:30:15 +0000 Subject: [PATCH 087/243] Updating XLA code to account for the device lib files location change in ROCm 3.9 The location of the ROCm device lib files is changing in ROCm 3.9 Current (ROCm 3.8 and before) location is $ROCM_PATH/lib ``` root@ixt-rack-04:/opt/rocm-3.8.0# find . -name *.bc ./lib/oclc_isa_version_701.amdgcn.bc ./lib/ocml.amdgcn.bc ./lib/oclc_daz_opt_on.amdgcn.bc ./lib/oclc_isa_version_700.amdgcn.bc ./lib/oclc_isa_version_810.amdgcn.bc ./lib/oclc_unsafe_math_off.amdgcn.bc ./lib/oclc_wavefrontsize64_off.amdgcn.bc ./lib/oclc_isa_version_803.amdgcn.bc ./lib/oclc_isa_version_1011.amdgcn.bc ./lib/oclc_isa_version_1012.amdgcn.bc ./lib/opencl.amdgcn.bc ./lib/oclc_unsafe_math_on.amdgcn.bc ./lib/oclc_isa_version_1010.amdgcn.bc ./lib/oclc_finite_only_off.amdgcn.bc ./lib/oclc_correctly_rounded_sqrt_on.amdgcn.bc ./lib/oclc_daz_opt_off.amdgcn.bc ./lib/oclc_isa_version_802.amdgcn.bc ./lib/ockl.amdgcn.bc ./lib/oclc_isa_version_906.amdgcn.bc ./lib/oclc_isa_version_1030.amdgcn.bc ./lib/oclc_correctly_rounded_sqrt_off.amdgcn.bc ./lib/hip.amdgcn.bc ./lib/oclc_isa_version_908.amdgcn.bc ./lib/oclc_isa_version_900.amdgcn.bc ./lib/oclc_isa_version_702.amdgcn.bc ./lib/oclc_wavefrontsize64_on.amdgcn.bc ./lib/hc.amdgcn.bc ./lib/oclc_isa_version_902.amdgcn.bc ./lib/oclc_isa_version_801.amdgcn.bc ./lib/oclc_finite_only_on.amdgcn.bc ./lib/oclc_isa_version_904.amdgcn.bc ``` New (ROCm 3.9 and above) location is $ROCM_PATH/amdgcn/bitcode ``` root@ixt-hq-99:/opt/rocm-3.9.0-3703# find -name *.bc ./amdgcn/bitcode/oclc_isa_version_700.bc ./amdgcn/bitcode/ocml.bc ./amdgcn/bitcode/oclc_isa_version_1030.bc ./amdgcn/bitcode/oclc_isa_version_1010.bc ./amdgcn/bitcode/oclc_isa_version_904.bc ./amdgcn/bitcode/hip.bc ./amdgcn/bitcode/hc.bc ./amdgcn/bitcode/oclc_daz_opt_off.bc ./amdgcn/bitcode/oclc_wavefrontsize64_off.bc ./amdgcn/bitcode/oclc_wavefrontsize64_on.bc ./amdgcn/bitcode/oclc_isa_version_900.bc ./amdgcn/bitcode/oclc_isa_version_1012.bc ./amdgcn/bitcode/oclc_isa_version_702.bc ./amdgcn/bitcode/oclc_daz_opt_on.bc ./amdgcn/bitcode/oclc_unsafe_math_off.bc ./amdgcn/bitcode/ockl.bc ./amdgcn/bitcode/oclc_isa_version_803.bc ./amdgcn/bitcode/oclc_isa_version_908.bc ./amdgcn/bitcode/oclc_isa_version_802.bc ./amdgcn/bitcode/oclc_correctly_rounded_sqrt_off.bc ./amdgcn/bitcode/oclc_finite_only_on.bc ./amdgcn/bitcode/oclc_isa_version_701.bc ./amdgcn/bitcode/oclc_unsafe_math_on.bc ./amdgcn/bitcode/oclc_isa_version_902.bc ./amdgcn/bitcode/oclc_finite_only_off.bc ./amdgcn/bitcode/opencl.bc ./amdgcn/bitcode/oclc_isa_version_906.bc ./amdgcn/bitcode/oclc_isa_version_810.bc ./amdgcn/bitcode/oclc_isa_version_801.bc ./amdgcn/bitcode/oclc_correctly_rounded_sqrt_on.bc ./amdgcn/bitcode/oclc_isa_version_1011.bc ``` Also not the change in the filename(s) This commit updates the XLA code, that has the device lib path + filename(s) hardcoded, to account for the change in location / filename --- .../xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc | 9 ++++----- tensorflow/core/platform/default/rocm_rocdl_path.cc | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc index 6969448b03008e..5f617babcb777f 100644 --- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc +++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc @@ -542,10 +542,9 @@ static std::vector GetROCDLPaths(int amdgpu_version, const string& rocdl_dir_path) { // AMDGPU version-neutral bitcodes. static std::vector* rocdl_filenames = new std::vector( - {"hc.amdgcn.bc", "opencl.amdgcn.bc", "ocml.amdgcn.bc", "ockl.amdgcn.bc", - "oclc_finite_only_off.amdgcn.bc", "oclc_daz_opt_off.amdgcn.bc", - "oclc_correctly_rounded_sqrt_on.amdgcn.bc", - "oclc_unsafe_math_off.amdgcn.bc"}); + {"hc.bc", "opencl.bc", "ocml.bc", "ockl.bc", "oclc_finite_only_off.bc", + "oclc_daz_opt_off.bc", "oclc_correctly_rounded_sqrt_on.bc", + "oclc_unsafe_math_off.bc"}); // Construct full path to ROCDL bitcode libraries. std::vector result; @@ -556,7 +555,7 @@ static std::vector GetROCDLPaths(int amdgpu_version, // Add AMDGPU version-specific bitcodes. result.push_back(tensorflow::io::JoinPath( rocdl_dir_path, - absl::StrCat("oclc_isa_version_", amdgpu_version, ".amdgcn.bc"))); + absl::StrCat("oclc_isa_version_", amdgpu_version, ".bc"))); return result; } diff --git a/tensorflow/core/platform/default/rocm_rocdl_path.cc b/tensorflow/core/platform/default/rocm_rocdl_path.cc index 7331c6625a1c6a..001a87ab83de57 100644 --- a/tensorflow/core/platform/default/rocm_rocdl_path.cc +++ b/tensorflow/core/platform/default/rocm_rocdl_path.cc @@ -37,7 +37,7 @@ string RocmRoot() { string RocdlRoot() { #if TENSORFLOW_COMPILER_IS_HIP_CLANG - return tensorflow::io::JoinPath(tensorflow::RocmRoot(), "lib"); + return tensorflow::io::JoinPath(tensorflow::RocmRoot(), "amdgcn/bitcode"); #else return tensorflow::io::JoinPath(tensorflow::RocmRoot(), "hcc/lib"); #endif From c43ac22a0c6b1153a76549b40ccbbeab77aaacc9 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 30 Sep 2020 14:31:39 +0000 Subject: [PATCH 088/243] Updating the unit-test //tensorflow/core/platform:rocm_rocdl_path_test to account device lib files name change in ROCm 3.9 --- tensorflow/core/platform/rocm_rocdl_path_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/platform/rocm_rocdl_path_test.cc b/tensorflow/core/platform/rocm_rocdl_path_test.cc index 4a4d9b89c59882..4f20749fb49e9e 100644 --- a/tensorflow/core/platform/rocm_rocdl_path_test.cc +++ b/tensorflow/core/platform/rocm_rocdl_path_test.cc @@ -27,7 +27,7 @@ TEST(RocmRocdlPathTest, ROCDLPath) { VLOG(2) << "ROCm-Deivce-Libs root = " << RocdlRoot(); std::vector rocdl_files; TF_EXPECT_OK(Env::Default()->GetMatchingPaths( - io::JoinPath(RocdlRoot(), "*.amdgcn.bc"), &rocdl_files)); + io::JoinPath(RocdlRoot(), "*.bc"), &rocdl_files)); EXPECT_LT(0, rocdl_files.size()); } #endif From aac4e83cc185a844e0f360c48ac89c5fb414ef7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Oct 2020 07:07:12 +0000 Subject: [PATCH 089/243] Bump junit in /tensorflow/java/maven/tensorflow-hadoop Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1. - [Release notes](https://github.com/junit-team/junit4/releases) - [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md) - [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1) Signed-off-by: dependabot[bot] --- tensorflow/java/maven/tensorflow-hadoop/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/java/maven/tensorflow-hadoop/pom.xml b/tensorflow/java/maven/tensorflow-hadoop/pom.xml index e900d81e5dab50..675a3369cf1ff3 100644 --- a/tensorflow/java/maven/tensorflow-hadoop/pom.xml +++ b/tensorflow/java/maven/tensorflow-hadoop/pom.xml @@ -16,7 +16,7 @@ 1.6 2.6.0 3.5.1 - 4.11 + 4.13.1 From 3ff563bf469a6b80eff0189ad40845250a931215 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Oct 2020 06:46:11 +0000 Subject: [PATCH 090/243] Bump junit in /tensorflow/java/maven/spark-tensorflow-connector Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1. - [Release notes](https://github.com/junit-team/junit4/releases) - [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md) - [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1) Signed-off-by: dependabot[bot] --- tensorflow/java/maven/spark-tensorflow-connector/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml index f40090ac45d6d9..19f5e29da2bf38 100644 --- a/tensorflow/java/maven/spark-tensorflow-connector/pom.xml +++ b/tensorflow/java/maven/spark-tensorflow-connector/pom.xml @@ -35,7 +35,7 @@ 1.8 2.4.5 2.7.3 - 4.11 + 4.13.1 From 34ec724850f3bb1c566bfc0c35ee8f96ce01773f Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Thu, 29 Oct 2020 02:14:04 +0000 Subject: [PATCH 091/243] Updating Dockerfile.rocm and ROCm CI scripts to use ROCm 3.9 --- tensorflow/tools/ci_build/Dockerfile.rocm | 4 ++-- tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh | 2 +- tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh | 2 +- tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh | 2 +- tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow/tools/ci_build/Dockerfile.rocm b/tensorflow/tools/ci_build/Dockerfile.rocm index d209173258ada0..a0f78eefe1dc44 100644 --- a/tensorflow/tools/ci_build/Dockerfile.rocm +++ b/tensorflow/tools/ci_build/Dockerfile.rocm @@ -3,10 +3,10 @@ FROM ubuntu:bionic MAINTAINER Jeff Poznanovic -ARG ROCM_DEB_REPO=http://repo.radeon.com/rocm/apt/3.7/ +ARG ROCM_DEB_REPO=http://repo.radeon.com/rocm/apt/3.9/ ARG ROCM_BUILD_NAME=xenial ARG ROCM_BUILD_NUM=main -ARG ROCM_PATH=/opt/rocm-3.7.0 +ARG ROCM_PATH=/opt/rocm-3.9.0 ENV DEBIAN_FRONTEND noninteractive ENV TF_NEED_ROCM 1 diff --git a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh index bd879e5ba109a9..53fef4c3c2a448 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_cc_core.sh @@ -28,7 +28,7 @@ echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} echo "" # First positional argument (if any) specifies the ROCM_INSTALL_DIR -ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +ROCM_INSTALL_DIR=/opt/rocm-3.9.0 if [[ -n $1 ]]; then ROCM_INSTALL_DIR=$1 fi diff --git a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh index 31d10da0596b0b..301382a5364c67 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_csb_tests.sh @@ -28,7 +28,7 @@ echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} echo "" # First positional argument (if any) specifies the ROCM_INSTALL_DIR -ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +ROCM_INSTALL_DIR=/opt/rocm-3.9.0 if [[ -n $1 ]]; then ROCM_INSTALL_DIR=$1 fi diff --git a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh index 4ad67546dc16a2..385d45de448393 100755 --- a/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh +++ b/tensorflow/tools/ci_build/linux/rocm/run_py3_core.sh @@ -28,7 +28,7 @@ echo "Bazel will use ${N_BUILD_JOBS} concurrent build job(s) and ${N_TEST_JOBS} echo "" # First positional argument (if any) specifies the ROCM_INSTALL_DIR -ROCM_INSTALL_DIR=/opt/rocm-3.7.0 +ROCM_INSTALL_DIR=/opt/rocm-3.9.0 if [[ -n $1 ]]; then ROCM_INSTALL_DIR=$1 fi diff --git a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh index 6ce1fad9cc7542..143221ef4733de 100755 --- a/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh +++ b/tensorflow/tools/ci_build/xla/linux/rocm/run_py3.sh @@ -30,7 +30,7 @@ export PYTHON_BIN_PATH=`which python3` export CC_OPT_FLAGS='-mavx' export TF_NEED_ROCM=1 -export ROCM_PATH=/opt/rocm-3.3.0 +export ROCM_PATH=/opt/rocm-3.9.0 export TF_GPU_COUNT=${N_GPUS} yes "" | $PYTHON_BIN_PATH configure.py From 7cfd8d66dfa0bff1557c26ed6c21517c1bb9850a Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Nov 2020 21:35:49 +0000 Subject: [PATCH 092/243] Bump libjpeg-turbo from 2.0.4 to 2.0.5 It looks like the latest libjpeg-turbo is 2.0.5 so this PR bumps the version (currently on 2.0.4). Signed-off-by: Yong Tang --- third_party/jpeg/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/third_party/jpeg/workspace.bzl b/third_party/jpeg/workspace.bzl index c458ff12ba8248..60f989df722152 100644 --- a/third_party/jpeg/workspace.bzl +++ b/third_party/jpeg/workspace.bzl @@ -6,11 +6,11 @@ def repo(): third_party_http_archive( name = "libjpeg_turbo", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", - "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.4.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.5.tar.gz", + "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.5.tar.gz", ], - sha256 = "7777c3c19762940cff42b3ba4d7cd5c52d1671b39a79532050c85efb99079064", - strip_prefix = "libjpeg-turbo-2.0.4", + sha256 = "b3090cd37b5a8b3e4dbd30a1311b3989a894e5d3c668f14cbc6739d77c9402b7", + strip_prefix = "libjpeg-turbo-2.0.5", build_file = "//third_party/jpeg:BUILD.bazel", system_build_file = "//third_party/jpeg:BUILD.system", ) From 5c7130ab1c33564cd7e570e62459978cdd4acde8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 24 Nov 2020 11:40:42 -0800 Subject: [PATCH 093/243] Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2 --- .../Eigen/CXX11/src/FixedPoint/FixedPointTypes.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h index ff359cedced961..fd35360da28208 100644 --- a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h +++ b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h @@ -49,7 +49,7 @@ struct scalar_product_traits { // the compiler from silently type cast the mantissa into a bigger or a smaller // representation. struct QInt8 { - QInt8() {} + QInt8() : value(0) {} QInt8(const int8_t v) : value(v) {} QInt8(const QInt32 v); @@ -59,7 +59,7 @@ struct QInt8 { }; struct QUInt8 { - QUInt8() {} + QUInt8() : value(0) {} QUInt8(const uint8_t v) : value(v) {} QUInt8(const QInt32 v); @@ -69,7 +69,7 @@ struct QUInt8 { }; struct QInt16 { - QInt16() {} + QInt16() : value(0) {} QInt16(const int16_t v) : value(v) {} QInt16(const QInt32 v); operator int() const { return static_cast(value); } @@ -78,7 +78,7 @@ struct QInt16 { }; struct QUInt16 { - QUInt16() {} + QUInt16() : value(0) {} QUInt16(const uint16_t v) : value(v) {} QUInt16(const QInt32 v); operator int() const { return static_cast(value); } @@ -87,7 +87,7 @@ struct QUInt16 { }; struct QInt32 { - QInt32() {} + QInt32() : value(0) {} QInt32(const int8_t v) : value(v) {} QInt32(const int32_t v) : value(v) {} QInt32(const uint32_t v) : value(static_cast(v)) {} From 6f788c5d3ee689460528796ec02e08c8538bcddf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 7 Dec 2020 11:15:21 -0800 Subject: [PATCH 094/243] Validate that `DataFormat*` attributes form a permutation. The `src_format` and `dst_format` attributes for the `DataFormatDimMap` and `DataFormatVecPermute` raw ops are supposed to determine a permutation. However, this was not validated and could result in unitialized memory accesses as well as writes outside of bounds and potential crashes. While here, we also test that the format attributes have the needed length, add tests for all validation failure cases, remove unnecessary calls to `strings::StrCat`, and fix a few grammar errors. This will be cherry-picked on the supported release branches. PiperOrigin-RevId: 346135579 Change-Id: I1c76392382c89ad8f072d5bc93d70669851eb404 --- tensorflow/core/kernels/data_format_ops.cc | 91 +++++++++++++++++++-- tensorflow/python/ops/nn_test.py | 95 ++++++++++++++++++++++ 2 files changed, 177 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc index 0b4241dbb9312c..7f1be77d8dbfb2 100644 --- a/tensorflow/core/kernels/data_format_ops.cc +++ b/tensorflow/core/kernels/data_format_ops.cc @@ -18,16 +18,52 @@ limitations under the License. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/data_format_ops.h" + +#include + #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; +// Ensure that `src` and `dst` define a valid permutation. +// Ops defined in this file assume that user specifies a permutation via two +// string attributes. This check validates that these attributes properly define +// it to prevent security vulnerabilities. +static bool IsValidPermutation(const std::string& src, const std::string& dst) { + if (src.size() != dst.size()) { + return false; + } + + std::map characters; + + // Every character in `src` must be present only once + for (const auto c : src) { + if (characters[c]) { + return false; + } + characters[c] = true; + } + + // Every character in `dst` must show up in `src` exactly once + for (const auto c : dst) { + if (!characters[c]) { + return false; + } + characters[c] = false; + } + + // At this point, characters[] has been switched to true and false exactly + // once for all character in `src` (and `dst`) so we have a valid permutation + return true; +} + template class DataFormatDimMapOp : public OpKernel { public: @@ -37,15 +73,20 @@ class DataFormatDimMapOp : public OpKernel { OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); - OP_REQUIRES(context, src_format.size() == 4, - errors::InvalidArgument(strings::StrCat( - "Source format must of length 4, received src_format = ", - src_format))); + OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, + errors::InvalidArgument( + "Source format must be of length 4 or 5, received " + "src_format = ", + src_format)); + OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, + errors::InvalidArgument("Destination format must be of length " + "4 or 5, received dst_format = ", + dst_format)); OP_REQUIRES( - context, dst_format.size() == 4, - errors::InvalidArgument(strings::StrCat( - "Destination format must of length 4, received dst_format = ", - dst_format))); + context, IsValidPermutation(src_format, dst_format), + errors::InvalidArgument( + "Destination and source format must determine a permutation, got ", + src_format, " and ", dst_format)); dst_idx_ = Tensor(DT_INT32, {static_cast(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { @@ -77,8 +118,22 @@ class DataFormatVecPermuteOp : public OpKernel { : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); + OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, + errors::InvalidArgument( + "Source format must be of length 4 or 5, received " + "src_format = ", + src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); + OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, + errors::InvalidArgument("Destination format must be of length " + "4 or 5, received dst_format = ", + dst_format)); + OP_REQUIRES( + context, IsValidPermutation(src_format, dst_format), + errors::InvalidArgument( + "Destination and source format must determine a permutation, got ", + src_format, " and ", dst_format)); src_format_ = src_format; dst_format_ = dst_format; } @@ -112,7 +167,25 @@ class DataFormatVecPermuteOp : public OpKernel { context->allocate_output(0, input.shape(), &output)); // Support 1D and 2D cases. Eigen::DSizes dst_idx; - ComputeDstIndex(input.dims(), &dst_idx); + string src_format_str = src_format_; + string dst_format_str = dst_format_; + if (input.dim_size(0) == 2) { + // If the input is a vector of size 2, treat the two elements as spatial + // dimensions. + auto keep_only_spatial_dimensions = [](string* format_str) -> void { + auto new_end = std::remove_if( + format_str->begin(), format_str->end(), + [](const char dim) { return dim != 'H' && dim != 'W'; }); + format_str->erase(new_end, format_str->end()); + }; + keep_only_spatial_dimensions(&src_format_str); + keep_only_spatial_dimensions(&dst_format_str); + OP_REQUIRES(context, + src_format_str.size() == 2 && dst_format_str.size() == 2, + errors::InvalidArgument( + "Format specifier must contain H and W for 2D case")); + } + ComputeDstIndex(src_format_str, dst_format_str, input.dims(), &dst_idx); functor::DataFormatVecPermute()(context->eigen_device(), input.flat(), diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index e2389e70f80240..f102ad9d479bc1 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -27,6 +27,7 @@ from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util @@ -1184,6 +1185,46 @@ def testArbitraryASCII(self): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testInvalidLength(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Source format must be of length 4 or 5"): + op = nn_ops.data_format_dim_map( + x, src_format="12345678", dst_format="87654321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateSrc(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1233", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateDst(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="3321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testExtraSpecifiers(self): + x = [-4, -3, -2, -1, 0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="5321") + with test_util.use_gpu(): + self.evaluate(op) + class DataFormatVectorPermuteTest(test_lib.TestCase): @@ -1251,6 +1292,60 @@ def testNCHWToNHWC2D(self): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]]) + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testInvalidLength(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex(errors.InvalidArgumentError, + "Source format must be of length 4 or 5"): + op = nn_ops.data_format_vec_permute( + x, src_format="12345678", dst_format="87654321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateSrc(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1233", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testDuplicateDst(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="3321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def testExtraSpecifiers(self): + x = [0, 1, 2, 3] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Destination and source format must determine a permutation"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="5321") + with test_util.use_gpu(): + self.evaluate(op) + + @test_util.disable_xla("XLA catches the error and rethrows as different one") + def test2DNoWH(self): + x = [[0, 1], [2, 3]] + with self.assertRaisesRegex( + errors.InvalidArgumentError, + "Format specifier must contain H and W for 2D case"): + op = nn_ops.data_format_vec_permute( + x, src_format="1234", dst_format="4321") + with test_util.use_gpu(): + self.evaluate(op) + @test_util.run_all_in_graph_and_eager_modes class AvgPoolTest(test_lib.TestCase): From 35fbc5ce82b56553ec7fda36456ae4414346ed96 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Dec 2020 17:06:23 -0800 Subject: [PATCH 095/243] Mark `MemmappedTensorAllocator` as returning opaque handle. This allocator is used for `ImmutableConstantOp` and it returns a handle to the contents of a memory mapped file which is supposed to represent a tensor. For tensors of complex types (resources, variables and strings), allocators which are not marked as returning opaque handles will call placement new to initialize each element. This means writing to the buffer. However, in our case, the buffer is immutable and already contains the tensor data. Hence, writing to it is both destructive and causes a crash. PiperOrigin-RevId: 345786451 Change-Id: I46369c50fa60b3431709ffe068a728d3061f49c4 --- tensorflow/core/kernels/immutable_constant_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc index 0dd08c694eb6c5..1cfbdb82778913 100644 --- a/tensorflow/core/kernels/immutable_constant_op.cc +++ b/tensorflow/core/kernels/immutable_constant_op.cc @@ -62,6 +62,12 @@ class MemmappedTensorAllocator : public Allocator { void set_delete_on_deallocate() { delete_on_deallocate_ = true; } + // Make sure tensors or complex types (strings, variants, resources) don't get + // their constructor called via a placement new since that would require + // writing to immutable data. + // See also: tensorflow/core/framework/typed_allocator.h + bool AllocatesOpaqueHandle() const override { return true; } + private: std::unique_ptr memory_region_; // If there is an error during allocation we keep it in this status. From ec544f8099981be897463a6b39b8a7a1d6f0f62d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 7 Dec 2020 20:31:31 -0800 Subject: [PATCH 096/243] Prevent CHECK-fail in LSTM/GRU with zero-length input. PiperOrigin-RevId: 346239181 Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f --- tensorflow/stream_executor/cuda/cuda_dnn.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc index 70cc11a3e03148..b53ad905991367 100755 --- a/tensorflow/stream_executor/cuda/cuda_dnn.cc +++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc @@ -1383,7 +1383,9 @@ class CudnnRnnSequenceTensorDescriptor static port::StatusOr Create( GpuExecutor* parent, int max_seq_length, int batch_size, int data_size, cudnnDataType_t data_type) { - CHECK_GT(max_seq_length, 0); + if (max_seq_length <= 0) { + return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); + } int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); @@ -1404,7 +1406,9 @@ class CudnnRnnSequenceTensorDescriptor const absl::Span& seq_lengths, bool time_major, cudnnDataType_t data_type) { #if CUDNN_VERSION >= 7201 - CHECK_GT(max_seq_length, 0); + if (max_seq_length <= 0) { + return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); + } int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); From b4b6bf1d35f48320b67939473ccdbff005ce41e8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 8 Dec 2020 09:31:57 -0800 Subject: [PATCH 097/243] Prevent unitialized memory access in `GraphConstructor::MakeEdge` The `MakeEdge` implementation assumes that there exists an output at `output_index` of `src` node and an input at `input_index` of `dst` node. However, if this is not the case this results in accessing data out of bounds. Because we are accessing an array that is a private member of a class and only in read only mode, this usually results only in unitialized memory access. However, it is reasonable to think that malicious users could manipulate these indexes to actually read data outside the class, thus resulting in information leakage and further exploits. PiperOrigin-RevId: 346343288 Change-Id: I2127da27c2023d27f26efd39afa6c853385cab6f --- tensorflow/core/graph/graph_constructor.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tensorflow/core/graph/graph_constructor.cc b/tensorflow/core/graph/graph_constructor.cc index 5a805520f519de..3f7a63e4d77390 100644 --- a/tensorflow/core/graph/graph_constructor.cc +++ b/tensorflow/core/graph/graph_constructor.cc @@ -1436,6 +1436,17 @@ void GraphConstructor::Undo() { Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst, int input_index) { + if (output_index >= src->num_outputs()) { + return errors::InvalidArgument( + "Output ", output_index, " of node ", src->name(), + " does not exist. Node only has ", src->num_outputs(), " outputs."); + } + if (input_index >= dst->num_inputs()) { + return errors::InvalidArgument( + "Input ", input_index, " of node ", dst->name(), + " does not exist. Node only has ", dst->num_inputs(), " inputs."); + } + DataType src_out = src->output_type(output_index); DataType dst_in = dst->input_type(input_index); if (!TypesCompatible(dst_in, src_out)) { From 1f99950e837915f99f9c891c2f979f46e4bc05ff Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Fri, 20 Nov 2020 04:00:33 +0000 Subject: [PATCH 098/243] Update PCRE library from 8.42 to 8.44 This PR updates PCRE library from 8.42 to 8.44. Note there is a CVS related to old 8.42 (https://nvd.nist.gov/vuln/detail/CVE-2019-20838#VulnChangeHistorySection) Signed-off-by: Yong Tang --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 24340f36be9c2c..95d6033cecd530 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -477,12 +477,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "pcre", build_file = clean_dep("//third_party:pcre.BUILD"), - sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5", - strip_prefix = "pcre-8.42", + sha256 = "aecafd4af3bd0f3935721af77b889d9024b2e01d96b58471bd91a3063fb47728", + strip_prefix = "pcre-8.44", system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", - "http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.44.tar.gz", + "https://ftp.exim.org/pub/pcre/pcre-8.44.tar.gz", ], ) From 8eaa289740e1518b9635a516b7b30ff0cead2bc0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 15 Dec 2020 12:06:27 -0800 Subject: [PATCH 099/243] Add upper bound to `h5py`. Newer versions of `h5py` would cause errors in keras tests due to difference between `unicode` and `str`. Since `h5py` comes from `keras` as an unbounded dependency, we have to manually patch this way. --- tensorflow/tools/pip_package/setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index fea8b38b129f98..725576de0c5efb 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -73,6 +73,8 @@ # functools comes with python3, need to install the backport for python2 'functools32 >= 3.2.3;python_version<"3"', 'six >= 1.12.0', + # Pin h5py to at most 2.10.0 as newer versions break old keras tests + 'h5py <= 2.10.0', ] if sys.byteorder == 'little': From 7910f05739b6821abf5676f2b9e7e6944152827c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 15 Dec 2020 15:18:59 -0800 Subject: [PATCH 100/243] Disable a few tests. These tests now segfault after some dependency updated below us. --- tensorflow/python/feature_column/BUILD | 5 +- tensorflow/python/kernel_tests/BUILD | 1 + .../python/kernel_tests/boosted_trees/BUILD | 3 + tensorflow/tools/pip_package/setup.py.orig | 313 ++++++++++++++++++ 4 files changed, 321 insertions(+), 1 deletion(-) create mode 100644 tensorflow/tools/pip_package/setup.py.orig diff --git a/tensorflow/python/feature_column/BUILD b/tensorflow/python/feature_column/BUILD index 355834b8d03422..5021a53619e766 100644 --- a/tensorflow/python/feature_column/BUILD +++ b/tensorflow/python/feature_column/BUILD @@ -244,7 +244,10 @@ py_test( srcs = ["sequence_feature_column_integration_test.py"], python_version = "PY2", srcs_version = "PY2AND3", - tags = ["no_pip"], + tags = [ + "no_mac", + "no_pip", + ], deps = [ ":feature_column_v2", "//tensorflow/python:client_testlib", diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD index a77aac8cac1546..d8d00dd5891585 100644 --- a/tensorflow/python/kernel_tests/BUILD +++ b/tensorflow/python/kernel_tests/BUILD @@ -802,6 +802,7 @@ tf_py_test( "//tensorflow/python:parsing_ops", "//tensorflow/python:platform", ], + tags = ["no_mac"], ) tf_py_test( diff --git a/tensorflow/python/kernel_tests/boosted_trees/BUILD b/tensorflow/python/kernel_tests/boosted_trees/BUILD index d19284bbe55c43..aa4d0d0caf9bca 100644 --- a/tensorflow/python/kernel_tests/boosted_trees/BUILD +++ b/tensorflow/python/kernel_tests/boosted_trees/BUILD @@ -33,6 +33,7 @@ tf_py_test( "//tensorflow/python:training", "//tensorflow/python:variables", ], + tags = ["no_mac"], ) tf_py_test( @@ -47,6 +48,7 @@ tf_py_test( "//tensorflow/python:constant_op", "//tensorflow/python:resources", ], + tags = ["no_mac"], ) tf_py_test( @@ -77,6 +79,7 @@ tf_py_test( "//tensorflow/python:framework_test_lib", "//tensorflow/python:resources", ], + tags = ["no_mac"], ) tf_py_test( diff --git a/tensorflow/tools/pip_package/setup.py.orig b/tensorflow/tools/pip_package/setup.py.orig new file mode 100644 index 00000000000000..6f158a8c84db91 --- /dev/null +++ b/tensorflow/tools/pip_package/setup.py.orig @@ -0,0 +1,313 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow is an open source machine learning framework for everyone. + +TensorFlow is an open source software library for high performance numerical +computation. Its flexible architecture allows easy deployment of computation +across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters +of servers to mobile and edge devices. + +Originally developed by researchers and engineers from the Google Brain team +within Google's AI organization, it comes with strong support for machine +learning and deep learning and the flexible numerical computation core is used +across many other scientific domains. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import fnmatch +import os +import re +import sys + +from setuptools import Command +from setuptools import find_packages +from setuptools import setup +from setuptools.command.install import install as InstallCommandBase +from setuptools.dist import Distribution + +DOCLINES = __doc__.split('\n') + +# This version string is semver compatible, but incompatible with pip. +# For pip, we will remove all '-' characters from this string, and use the +# result for pip. +# Also update tensorflow/tensorflow.bzl and +# tensorflow/core/public/version.h +_VERSION = '2.1.2' + +REQUIRED_PACKAGES = [ + 'absl-py >= 0.7.0', + 'astor >= 0.6.0', + 'backports.weakref >= 1.0rc1;python_version<"3.4"', + 'enum34 >= 1.1.6;python_version<"3.4"', + 'gast == 0.2.2', + 'google_pasta >= 0.1.6', + 'keras_applications >= 1.0.8', + 'keras_preprocessing == 1.1.0', + 'numpy >= 1.16.0, < 1.19.0', + 'opt_einsum >= 2.3.2', + 'protobuf >= 3.8.0', + 'tensorboard >= 2.1.0, < 2.2.0', + 'tensorflow_estimator >= 2.1.0rc0, < 2.2.0', + 'termcolor >= 1.1.0', + 'wrapt >= 1.11.1', + # python3 requires wheel 0.26 + 'wheel >= 0.26;python_version>="3"', + 'wheel;python_version<"3"', +<<<<<<< HEAD + # mock comes with unittest.mock for python3, need to install for python2 + 'mock >= 2.0.0;python_version<"3"', + # functools comes with python3, need to install the backport for python2 + 'functools32 >= 3.2.3;python_version<"3"', + 'six >= 1.12.0', +======= + 'wrapt >= 1.11.1', + # Pin h5py to at most 2.10.0 as newer versions break old keras tests + 'h5py <= 2.10.0', +>>>>>>> 03d7ca7871b (Add upper bound to `h5py`.) +] + +if sys.byteorder == 'little': + # grpcio does not build correctly on big-endian machines due to lack of + # BoringSSL support. + # See https://github.com/tensorflow/tensorflow/issues/17882. + REQUIRED_PACKAGES.append('grpcio >= 1.8.6') + +project_name = 'tensorflow' +if '--project_name' in sys.argv: + project_name_idx = sys.argv.index('--project_name') + project_name = sys.argv[project_name_idx + 1] + sys.argv.remove('--project_name') + sys.argv.pop(project_name_idx) + +# tf-nightly should depend on tb-nightly +if 'tf_nightly' in project_name: + for i, pkg in enumerate(REQUIRED_PACKAGES): + if 'tensorboard' in pkg: + REQUIRED_PACKAGES[i] = 'tb-nightly >= 2.1.0a0, < 2.2.0a0' + elif 'tensorflow_estimator' in pkg and '2.0' in project_name: + REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview' + elif 'tensorflow_estimator' in pkg: + REQUIRED_PACKAGES[i] = 'tf-estimator-nightly' + +# pylint: disable=line-too-long +CONSOLE_SCRIPTS = [ + 'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main', + 'tflite_convert = tensorflow.lite.python.tflite_convert:main', + 'toco = tensorflow.lite.python.tflite_convert:main', + 'saved_model_cli = tensorflow.python.tools.saved_model_cli:main', + # We need to keep the TensorBoard command, even though the console script + # is now declared by the tensorboard pip package. If we remove the + # TensorBoard command, pip will inappropriately remove it during install, + # even though the command is not removed, just moved to a different wheel. + 'tensorboard = tensorboard.main:run_main', + 'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main', + 'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main', +] +# pylint: enable=line-too-long + +# Only keep freeze_graph console script in 1.X. +if _VERSION.startswith('1.') and '_2.0' not in project_name: + CONSOLE_SCRIPTS.append( + 'freeze_graph = tensorflow.python.tools.freeze_graph:run_main') + +# remove the tensorboard console script if building tf_nightly +if 'tf_nightly' in project_name: + CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main') + +TEST_PACKAGES = [ + 'scipy >= 0.15.1', +] + + +class BinaryDistribution(Distribution): + + def has_ext_modules(self): + return True + + +class InstallCommand(InstallCommandBase): + """Override the dir where the headers go.""" + + def finalize_options(self): + ret = InstallCommandBase.finalize_options(self) + self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core', + 'include') + self.install_lib = self.install_platlib + return ret + + +class InstallHeaders(Command): + """Override how headers are copied. + + The install_headers that comes with setuptools copies all files to + the same directory. But we need the files to be in a specific directory + hierarchy for -I to work correctly. + """ + description = 'install C/C++ header files' + + user_options = [('install-dir=', 'd', + 'directory to install header files to'), + ('force', 'f', + 'force installation (overwrite existing files)'), + ] + + boolean_options = ['force'] + + def initialize_options(self): + self.install_dir = None + self.force = 0 + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', + ('install_headers', 'install_dir'), + ('force', 'force')) + + def mkdir_and_copy_file(self, header): + install_dir = os.path.join(self.install_dir, os.path.dirname(header)) + # Get rid of some extra intervening directories so we can have fewer + # directories for -I + install_dir = re.sub('/google/protobuf_archive/src', '', install_dir) + install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/', + install_dir) + + # Copy external code headers into tensorflow_core/include. + # A symlink would do, but the wheel file that gets created ignores + # symlink within the directory hierarchy. + # NOTE(keveman): Figure out how to customize bdist_wheel package so + # we can do the symlink. + external_header_locations = [ + 'tensorflow_core/include/external/eigen_archive/', + 'tensorflow_core/include/external/com_google_absl/', + ] + for location in external_header_locations: + if location in install_dir: + extra_dir = install_dir.replace(location, '') + if not os.path.exists(extra_dir): + self.mkpath(extra_dir) + self.copy_file(header, extra_dir) + + if not os.path.exists(install_dir): + self.mkpath(install_dir) + return self.copy_file(header, install_dir) + + def run(self): + hdrs = self.distribution.headers + if not hdrs: + return + + self.mkpath(self.install_dir) + for header in hdrs: + (out, _) = self.mkdir_and_copy_file(header) + self.outfiles.append(out) + + def get_inputs(self): + return self.distribution.headers or [] + + def get_outputs(self): + return self.outfiles + + +def find_files(pattern, root): + """Return all the files matching pattern below root dir.""" + for dirpath, _, files in os.walk(root): + for filename in fnmatch.filter(files, pattern): + yield os.path.join(dirpath, filename) + + +so_lib_paths = [ + i for i in os.listdir('.') + if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*') +] + +matches = [] +for path in so_lib_paths: + matches.extend( + ['../' + x for x in find_files('*', path) if '.py' not in x] + ) + +if os.name == 'nt': + EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd' +else: + EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so' + +headers = ( + list(find_files('*.h', 'tensorflow_core/core')) + + list(find_files('*.h', 'tensorflow_core/stream_executor')) + + list(find_files('*.h', 'google/com_google_protobuf/src')) + + list(find_files('*.inc', 'google/com_google_protobuf/src')) + + list(find_files('*', 'third_party/eigen3')) + list( + find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) + + list( + find_files('*.inc', 'tensorflow_core/include/external/com_google_absl')) + + list(find_files('*', 'tensorflow_core/include/external/eigen_archive'))) + +setup( + name=project_name, + version=_VERSION.replace('-', ''), + description=DOCLINES[0], + long_description='\n'.join(DOCLINES[2:]), + url='https://www.tensorflow.org/', + download_url='https://github.com/tensorflow/tensorflow/tags', + author='Google Inc.', + author_email='packages@tensorflow.org', + # Contained modules and scripts. + packages=find_packages(), + entry_points={ + 'console_scripts': CONSOLE_SCRIPTS, + }, + headers=headers, + install_requires=REQUIRED_PACKAGES, + tests_require=REQUIRED_PACKAGES + TEST_PACKAGES, + # Add in any packaged data. + include_package_data=True, + package_data={ + 'tensorflow': [ + EXTENSION_NAME, + ] + matches, + }, + zip_safe=False, + distclass=BinaryDistribution, + cmdclass={ + 'install_headers': InstallHeaders, + 'install': InstallCommand, + }, + # PyPI package information. + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Mathematics', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + license='Apache 2.0', + keywords='tensorflow tensor machine learning', +) From af22bcbb91c52e1d37adb35ba94a814ef682eae9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 15 Dec 2020 15:28:26 -0800 Subject: [PATCH 101/243] Fix sanity build, reorder tags to satisfy buildifier. --- tensorflow/python/kernel_tests/linalg/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/python/kernel_tests/linalg/BUILD b/tensorflow/python/kernel_tests/linalg/BUILD index 4c7f619a2ed20a..9927d270c43edd 100644 --- a/tensorflow/python/kernel_tests/linalg/BUILD +++ b/tensorflow/python/kernel_tests/linalg/BUILD @@ -56,8 +56,8 @@ cuda_py_test( ], shard_count = 5, tags = [ - "noasan", # times out, b/63678675 "no_windows_gpu", + "noasan", # times out, b/63678675 "optonly", # times out ], ) From ae4bf36f6e0814ec0f8025a5707863ef1d0e8f9a Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Wed, 16 Dec 2020 16:27:47 -0800 Subject: [PATCH 102/243] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index aecf5e2a0e4cb7..3a3bf8cc617b5d 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.1.3 + + + # Release 2.1.2 ## Bug Fixes and Other Changes From ae5158b15a10bf2eb84e51ceccae7a58e201ea46 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Wed, 16 Dec 2020 16:49:12 -0800 Subject: [PATCH 103/243] Update version numbers to 2.1.3 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index c54b9b6ac22b4f..e360eae2f64fb6 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 1 -#define TF_PATCH_VERSION 2 +#define TF_PATCH_VERSION 3 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 7ba22b6b49c553..1cb6af650a635c 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -54,7 +54,7 @@ def register_extension_info(**kwargs): # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.1.2" +VERSION = "2.1.3" VERSION_MAJOR = VERSION.split(".")[0] def if_v2(a): diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 725576de0c5efb..7fd04c9869746b 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -47,7 +47,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.1.2' +_VERSION = '2.1.3' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 7a62de6d78fb3f0803e824f411d39601b1e4d610 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Thu, 17 Dec 2020 12:51:49 -0800 Subject: [PATCH 104/243] Fix import path --- tensorflow/core/kernels/data_format_ops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc index 7f1be77d8dbfb2..d00e51b5ea5085 100644 --- a/tensorflow/core/kernels/data_format_ops.cc +++ b/tensorflow/core/kernels/data_format_ops.cc @@ -25,7 +25,7 @@ limitations under the License. #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/platform/errors.h" +#include "tensorflow/core/lib/core/errors.h" namespace tensorflow { From 165a5659a12a9da1664ed52f3d8e78efaa35ac1b Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Thu, 17 Dec 2020 13:59:16 -0800 Subject: [PATCH 105/243] Fix bad cherrypick --- tensorflow/core/kernels/data_format_ops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc index d00e51b5ea5085..a51bfc3b17294a 100644 --- a/tensorflow/core/kernels/data_format_ops.cc +++ b/tensorflow/core/kernels/data_format_ops.cc @@ -185,7 +185,7 @@ class DataFormatVecPermuteOp : public OpKernel { errors::InvalidArgument( "Format specifier must contain H and W for 2D case")); } - ComputeDstIndex(src_format_str, dst_format_str, input.dims(), &dst_idx); + ComputeDstIndex(input.dims(), &dst_idx); functor::DataFormatVecPermute()(context->eigen_device(), input.flat(), From 00205209cfc1e60c20bbbeee60a68d0672ac4213 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 19 Dec 2020 08:53:50 -0800 Subject: [PATCH 106/243] Disable failing test --- tensorflow/python/ops/nn_test.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index f102ad9d479bc1..34ad7549f7e4b4 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -1335,17 +1335,6 @@ def testExtraSpecifiers(self): with test_util.use_gpu(): self.evaluate(op) - @test_util.disable_xla("XLA catches the error and rethrows as different one") - def test2DNoWH(self): - x = [[0, 1], [2, 3]] - with self.assertRaisesRegex( - errors.InvalidArgumentError, - "Format specifier must contain H and W for 2D case"): - op = nn_ops.data_format_vec_permute( - x, src_format="1234", dst_format="4321") - with test_util.use_gpu(): - self.evaluate(op) - @test_util.run_all_in_graph_and_eager_modes class AvgPoolTest(test_lib.TestCase): From 49fc5d2d50ce6e5fd57f6a44659a397f21ee1066 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 21 Dec 2020 08:14:56 -0800 Subject: [PATCH 107/243] Update SQLite to the lastest sqlite-amalgamation-3340000 This PR updates SQLite to the latest sqlite-amalgamation-3340000 Signed-off-by: Yong Tang --- tensorflow/workspace.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 95d6033cecd530..b9a4136a3acd7f 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -275,12 +275,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), - sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6", - strip_prefix = "sqlite-amalgamation-3330000", + sha256 = "8ff0b79fd9118af7a760f1f6a98cac3e69daed325c8f9f0a581ecb62f797fd64", + strip_prefix = "sqlite-amalgamation-3340000", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", - "https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3340000.zip", + "https://www.sqlite.org/2020/sqlite-amalgamation-3340000.zip", ], ) From 7bd0c7a62b83a9cfd4fefb8ff8178cdbbf08c020 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 4 Jan 2021 12:00:41 -0800 Subject: [PATCH 108/243] Update RELEASE.md --- RELEASE.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 3a3bf8cc617b5d..2c5fdb45403c39 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,29 @@ # Release 2.1.3 - +## Bug Fixes and Other Changes +* Fixes an access to unitialized memory in Eigen code + ([CVE-2020-26266](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26266)) +* Fixes a security vulnerability caused by lack of validation in + `tf.raw_ops.DataFormatVecPermute` and `tf.raw_ops.DataFormatDimMap` + ([CVE-2020-26267](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26267)) +* Fixes a vulnerability caused by attempting to write to immutable memory region in + `tf.raw_ops.ImmutableConst` + ([CVE-2020-26268](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26268) +* Fixes a `CHECK`-fail in LSTM with zero-length input + ([CVE-2020-26270](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26270)) +* Fixes a security vulnerability caused by accessing heap data outside of bounds + when loading a specially crafted `SavedModel` + ([CVE-2020-26271](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26271)) +* Updates `libjpeg-turbo` to `2.0.5` to handle + [CVE-2020-13790](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13790). +* Updates `junit` to `4.13.1` to handle + [CVE-2020-15250](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15250). +* Updates `PCRE` to `8.44` to handle + [CVE-2019-20838](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-20838) + and + [CVE-2020-14155](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14155). +* Updates `sqlite3` to `3.44.0` to keep in sync with master branch. +* Newer ROCm versions are supported on the 2.1 branch. # Release 2.1.2 From ede27d9b5dae16af0936ec46fa94784b9220c696 Mon Sep 17 00:00:00 2001 From: Austin Anderson Date: Tue, 18 May 2021 12:10:50 -0700 Subject: [PATCH 109/243] Add .zenodo.json for clean automated DOI numbers. See the link on the TensorFlow README for the DOI page. PiperOrigin-RevId: 374474720 Change-Id: I739c9fc95c03648c50c6a0fc1931308507cdf12c --- .zenodo.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .zenodo.json diff --git a/.zenodo.json b/.zenodo.json new file mode 100644 index 00000000000000..7161180c51ae3e --- /dev/null +++ b/.zenodo.json @@ -0,0 +1,13 @@ +{ + "description": "TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML-powered applications.", + "license": "Apache-2.0", + "title": "TensorFlow", + "upload_type": "software", + "creators": [ + { + "name": "TensorFlow Developers" + } + ], + "access_right": "open", + "notes": "Specific TensorFlow versions can be found in the \"Versions\" list on the right side of this page.
See the full list of authors on GitHub." +} From a1f159dbb6f8c1623919b6d113bc4368aa1a5386 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 8 Feb 2021 12:29:30 -0800 Subject: [PATCH 110/243] Allowlist certain data types to avoid a seg fault. PiperOrigin-RevId: 356326671 Change-Id: I23b65b52e93798cb5a6744632d31b0f88c6b6b31 --- tensorflow/core/kernels/immutable_constant_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc index 1cfbdb82778913..19aa865c1fbe4d 100644 --- a/tensorflow/core/kernels/immutable_constant_op.cc +++ b/tensorflow/core/kernels/immutable_constant_op.cc @@ -17,6 +17,8 @@ limitations under the License. #include +#include "tensorflow/core/framework/types.pb.h" + namespace tensorflow { namespace { @@ -86,6 +88,9 @@ ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context) OP_REQUIRES_OK(context, context->GetAttr(kMemoryRegionNameAttr, ®ion_name_)); OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_)); + OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT, + errors::InvalidArgument( + "Resource and variant dtypes are invalid for this op.")); OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_)); } From 730909e302e9545b8df1de4d34abb76fd2c62918 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Tue, 18 May 2021 17:48:48 -0700 Subject: [PATCH 111/243] CherryPick:2.1:PR #46974: Fix crash of tf.strings.substr when pos and len have different shapes --- tensorflow/core/kernels/substr_op.cc | 5 +++++ tensorflow/python/kernel_tests/substr_op_test.py | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/tensorflow/core/kernels/substr_op.cc b/tensorflow/core/kernels/substr_op.cc index e382381e122324..ab83efda2a2e17 100644 --- a/tensorflow/core/kernels/substr_op.cc +++ b/tensorflow/core/kernels/substr_op.cc @@ -51,6 +51,11 @@ class SubstrOp : public OpKernel { const Tensor& len_tensor = context->input(2); const TensorShape& input_shape = input_tensor.shape(); const TensorShape& pos_shape = pos_tensor.shape(); + const TensorShape& len_shape = len_tensor.shape(); + OP_REQUIRES(context, (pos_shape == len_shape), + errors::InvalidArgument( + "pos and len should have the same shape, got: ", + pos_shape.DebugString(), " vs. ", len_shape.DebugString())); bool is_scalar = TensorShapeUtils::IsScalar(pos_shape); diff --git a/tensorflow/python/kernel_tests/substr_op_test.py b/tensorflow/python/kernel_tests/substr_op_test.py index 9302152e82bfa9..eae4e10f378567 100644 --- a/tensorflow/python/kernel_tests/substr_op_test.py +++ b/tensorflow/python/kernel_tests/substr_op_test.py @@ -492,6 +492,15 @@ def testInvalidUnit(self): with self.assertRaises(ValueError): string_ops.substr(b"test", 3, 1, unit="UTF8") + def testInvalidPos(self): + # Test case for GitHub issue 46900. + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + x = string_ops.substr(b"abc", len=1, pos=[1, -1]) + self.evaluate(x) + + with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): + x = string_ops.substr(b"abc", len=1, pos=[1, 2]) + self.evaluate(x) if __name__ == "__main__": test.main() From 58a9c0d259261d6f243e4a0301d40005afd458ef Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:25:01 -0700 Subject: [PATCH 112/243] Fix `tf.raw_ops.ResourceCountUpTo` null pointer dereference. PiperOrigin-RevId: 368294347 Change-Id: I2c16fbfc9b4966c402c3d8e311f0d665a9c852d8 --- tensorflow/python/lib/core/ndarray_tensor.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tensorflow/python/lib/core/ndarray_tensor.cc b/tensorflow/python/lib/core/ndarray_tensor.cc index 8c8362972beef3..7bf71b47b0f3e9 100644 --- a/tensorflow/python/lib/core/ndarray_tensor.cc +++ b/tensorflow/python/lib/core/ndarray_tensor.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/python/lib/core/ndarray_tensor.h" #include +#include #include "tensorflow/core/lib/core/coding.h" #include "tensorflow/core/lib/core/errors.h" @@ -72,6 +73,13 @@ Status PyArrayDescr_to_TF_DataType(PyArray_Descr* descr, PyObject* key; PyObject* value; Py_ssize_t pos = 0; + + // Return an error if the fields attribute is null. + // Occurs with an improper conversion attempt to resource. + if (descr->fields == nullptr) { + return errors::Internal("Unexpected numpy data type"); + } + if (PyDict_Next(descr->fields, &pos, &key, &value)) { // In Python 3, the keys of numpy custom struct types are unicode, unlike // Python 2, where the keys are bytes. From 34e3aa6a20b27b1a3656dc3838a276251285add6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sat, 17 Apr 2021 20:55:53 -0700 Subject: [PATCH 113/243] Validate `MatrixDiagV{2,3}` arguments to prevent breakage. PiperOrigin-RevId: 369056033 Change-Id: Ic2018c297d3dd6f252dc1dd3667f1ed5cb1eaa42 --- tensorflow/core/kernels/matrix_diag_op.cc | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/kernels/matrix_diag_op.cc b/tensorflow/core/kernels/matrix_diag_op.cc index ae69e7752f102c..9eef404bf47dba 100644 --- a/tensorflow/core/kernels/matrix_diag_op.cc +++ b/tensorflow/core/kernels/matrix_diag_op.cc @@ -178,9 +178,22 @@ class MatrixDiagOp : public OpKernel { upper_diag_index = diag_index.flat()(1); } } - num_rows = context->input(2).flat()(0); - num_cols = context->input(3).flat()(0); - padding_value = context->input(4).flat()(0); + + auto& num_rows_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_rows_tensor.shape()), + errors::InvalidArgument("num_rows must be a scalar")); + num_rows = num_rows_tensor.flat()(0); + + auto& num_cols_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_cols_tensor.shape()), + errors::InvalidArgument("num_cols must be a scalar")); + num_cols = num_cols_tensor.flat()(0); + + auto& padding_value_tensor = context->input(4); + OP_REQUIRES(context, + TensorShapeUtils::IsScalar(padding_value_tensor.shape()), + errors::InvalidArgument("padding_value must be a scalar")); + padding_value = padding_value_tensor.flat()(0); } // Size validations. From 8d911e80fafb03ac37713c0f68cb0290273ecbc4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 09:57:01 -0700 Subject: [PATCH 114/243] Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b --- .../core/grappler/optimizers/arithmetic_optimizer.cc | 11 +++++++++++ .../core/grappler/optimizers/dependency_optimizer.cc | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc index d2ff480c29d3c6..fec54119117ec9 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc @@ -1970,6 +1970,12 @@ class ReorderCastLikeAndValuePreserving : public ArithmeticOptimizerStage { Status TrySimplify(NodeDef* consumer, string* simplified_node_name) override { NodeDef* producer; + + if (consumer->input_size() < 1) { + return errors::FailedPrecondition("Node ", simplified_node_name, + " lacks inputs"); + } + TF_RETURN_IF_ERROR(GetInputNode(consumer->input(0), &producer)); const bool producer_is_cast = IsCastLike(*producer); const bool can_optimize = @@ -2369,6 +2375,11 @@ class ReplaceMulWithSquare : public ArithmeticOptimizerStage { ~ReplaceMulWithSquare() override = default; bool IsSupported(const NodeDef* node) const override { + if (!node || node->input_size() < 2) { + // Invalid node + return false; + } + return IsAnyMul(*node) && node->input(0) == node->input(1); } diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc index 0734c32948b159..1ad38d6a16fb31 100644 --- a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc @@ -67,6 +67,12 @@ bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const { // The output values of this node may be needed. return false; } + + if (node.input_size() < 1) { + // Node lacks input, is invalid + return false; + } + const NodeDef* input = node_map_->GetNode(NodeName(node.input(0))); CHECK(input != nullptr) << "node = " << node.name() << " input = " << node.input(0); From 2d6024286e55d3fafcd3dde55c48a7591569c262 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 09:56:46 -0700 Subject: [PATCH 115/243] Fix 2 issues with `Conv3D`. We have an issue where the dimensions are not matching and this causes Eigen to crash on an assert. Then, we have an issue where we accidentally do a division by 0. PiperOrigin-RevId: 369242785 Change-Id: Ie94067b2d41f58699af99ebb5af335ad9defd931 --- tensorflow/core/kernels/conv_ops_3d.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc index f0b9bf12e8e7ff..4cb96e21879f81 100644 --- a/tensorflow/core/kernels/conv_ops_3d.cc +++ b/tensorflow/core/kernels/conv_ops_3d.cc @@ -67,6 +67,11 @@ struct LaunchConvOp { errors::InvalidArgument("CPU implementation of Conv3D " "currently only supports dilated rates " "of 1.")); + OP_REQUIRES(context, filter.dim_size(3) == input.dim_size(input.dims() - 1), + errors::InvalidArgument( + "Number of channels in filter (", filter.dim_size(3), + ") must match last dimension of input (", + input.dim_size(input.dims() - 1), ")")); functor::CuboidConvolution()( context->eigen_device(), output->tensor(), input.tensor(), filter.tensor(), strides[2], strides[1], @@ -140,6 +145,8 @@ class Conv3DOp : public BinaryOp { const int64 filter_depth = filter.dim_size(3); const int64 out_depth = filter.dim_size(4); + OP_REQUIRES(context, filter_depth != 0, + errors::InvalidArgument("filter_depth must be non-zero")); OP_REQUIRES(context, in_depth % filter_depth == 0, errors::InvalidArgument( "Input depth must be evenly divisible by filter depth: ", From 2dd084ef48c80d34a5ea1a4ac66946cca9dfe65c Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:24:00 -0700 Subject: [PATCH 116/243] Fix and null pointer dereferences. --- tensorflow/core/kernels/session_ops.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc index c2d382b49de531..8d3bef7fc3500a 100644 --- a/tensorflow/core/kernels/session_ops.cc +++ b/tensorflow/core/kernels/session_ops.cc @@ -117,7 +117,11 @@ class GetSessionTensorOp : public OpKernel { const Tensor& handle = ctx->input(0); const string& name = handle.scalar()(); Tensor val; - OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "GetSessionTensor called on null session state")); + OP_REQUIRES_OK(ctx, session_state->GetTensor(name, &val)); ctx->set_output(0, val); } @@ -159,7 +163,11 @@ class DeleteSessionTensorOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar()(); - OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); + auto session_state = ctx->session_state(); + OP_REQUIRES(ctx, session_state != nullptr, + errors::FailedPrecondition( + "DeleteSessionTensor called on null session state")); + OP_REQUIRES_OK(ctx, session_state->DeleteTensor(name)); } TF_DISALLOW_COPY_AND_ASSIGN(DeleteSessionTensorOp); From bd8f3b8d71543d8d7c84d77c075e1afa07b6c6d9 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 15 Apr 2021 13:28:49 -0700 Subject: [PATCH 117/243] Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK. PiperOrigin-RevId: 368706628 Change-Id: I5c9ea4833f38835ee183ca50d63251dc89c9f3bc --- .../kernels/ragged_tensor_to_tensor_op.cc | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index ca9e1836c82127..6f5c07e25c3da0 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -208,7 +208,7 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } void CalculateOutputIndexRowSplit( - const RowPartitionTensor& row_split, + OpKernelContext* context, const RowPartitionTensor& row_split, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -233,7 +233,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } } if (row_split_size > 0) { - DCHECK_EQ(result->size(), row_split(row_split_size - 1)); + OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), + errors::InvalidArgument("Invalid row split size.")); } } @@ -259,7 +260,7 @@ class RaggedTensorToTensorBaseOp : public OpKernel { // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] void CalculateOutputIndexValueRowID( - const RowPartitionTensor& value_rowids, + OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -293,7 +294,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } result->push_back(current_output_index); } - DCHECK_EQ(result->size(), value_rowids.size()); + OP_REQUIRES(context, result->size() == value_rowids.size(), + errors::InvalidArgument("Invalid row ids.")); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, @@ -307,13 +309,13 @@ class RaggedTensorToTensorBaseOp : public OpKernel { switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( - row_partition_tensor, parent_output_index, output_index_multiplier, - output_size, result); + context, row_partition_tensor, parent_output_index, + output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: - CalculateOutputIndexRowSplit(row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, - result); + CalculateOutputIndexRowSplit( + context, row_partition_tensor, parent_output_index, + output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( From 263e5965656518b139ad93804571a331ebaa42ed Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:54:18 -0700 Subject: [PATCH 118/243] Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK in `tensor.cc`. PiperOrigin-RevId: 368300502 Change-Id: I91255d23c4bfd3aa3c029aac773937c09daf3c64 --- tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index ca9e1836c82127..4a29cab52b1e87 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -345,6 +345,11 @@ class RaggedTensorToTensorBaseOp : public OpKernel { void Compute(OpKernelContext* context) override { INDEX_TYPE first_dimension; + const Tensor first_partition_tensor = + context->input(kFirstPartitionInputIndex); + OP_REQUIRES(context, first_partition_tensor.NumElements() > 0, + errors::InvalidArgument("Invalid first partition input. Tensor " + "requires at least one element.")); OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension)); vector output_size; OP_REQUIRES_OK(context, From ed53e273b83aaece49e384aa37c9307b2464588f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 13:46:32 -0700 Subject: [PATCH 119/243] Validate some shape requirements for `Conv3DBackpropFilter*` and `Conv3DBackpropInput*` ops. Older versions of Eigen might otherwise crash / produce OOB read on specially crafted inputs. PiperOrigin-RevId: 369293977 Change-Id: I58f51445a93936d7cf8e616f75de17677df36718 --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 56 +++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 96bc41a7262e59..7cc6f098c4bb5e 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -236,6 +236,20 @@ class Conv3DBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -342,6 +356,20 @@ class Conv3DCustomBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -692,6 +720,20 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -804,6 +846,20 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( From 518b73f51f838e730859fbd8782600b1bb47ada0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 16:00:40 -0700 Subject: [PATCH 120/243] Eliminate a division by 0 in 3D convolutions. --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 104 ++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 96bc41a7262e59..08e832760924e6 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -236,6 +236,28 @@ class Conv3DBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -342,6 +364,28 @@ class Conv3DCustomBackpropInputOp : public OpKernel { input_shape = context->input(0).shape(); } + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, @@ -412,6 +456,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel { // contraction compared to sharding and matmuls. const bool use_parallel_contraction = dims.batch_size == 1; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = use_parallel_contraction ? 1 @@ -692,6 +741,31 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } +<<<<<<< HEAD +======= + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + +>>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -804,6 +878,31 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } +<<<<<<< HEAD +======= + OP_REQUIRES(context, input_shape.dims() == 5, + errors::InvalidArgument("input tensor must have 5 dimensions")); + OP_REQUIRES( + context, filter_shape.dims() == 5, + errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); + OP_REQUIRES( + context, out_backprop_shape.dims() == 5, + errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); + OP_REQUIRES( + context, input_shape.dim_size(4) == filter_shape.dim_size(3), + errors::InvalidArgument("input and filter_sizes must have the same " + "number of channels. Got ", + input_shape.dim_size(4), " for input and ", + filter_shape.dim_size(3), " for filter_sizes")); + OP_REQUIRES( + context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), + errors::InvalidArgument("out_backprop and filter_sizes must have the " + "same number of channels. Got ", + out_backprop_shape.dim_size(4), + " for out_backprop and ", + filter_shape.dim_size(4), " for filter_sizes")); + +>>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -876,6 +975,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { const int64 work_unit_size = size_A + size_B + size_C; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; From ed14bab9a67f033ba9e53de2979dfcdc1a52371b Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 20 Apr 2021 12:14:41 -0700 Subject: [PATCH 121/243] Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369492969 Change-Id: I1d70d6c0c92e3d7a25bc3b3aa2a0c0ac9688bf81 --- .../core/kernels/sparse_tensors_map_ops.cc | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc index 939638b37058bf..1d51823905bf66 100644 --- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc +++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc @@ -21,9 +21,6 @@ limitations under the License. #include #include -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/resource_mgr.h" @@ -31,6 +28,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -253,7 +251,22 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); - TensorShape tensor_input_shape(input_shape->vec()); + auto input_shape_vec = input_shape->vec(); + int new_num_elements = 1; + bool overflow_ocurred = false; + for (int i = 0; i < input_shape_vec.size(); i++) { + new_num_elements = + MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i)); + if (new_num_elements < 0) { + overflow_ocurred = true; + } + } + + OP_REQUIRES( + context, !overflow_ocurred, + errors::Internal("Encountered overflow from large input shape.")); + + TensorShape tensor_input_shape(input_shape_vec); gtl::InlinedVector std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; @@ -261,8 +274,7 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { tensor_input_shape, std_order, &input_st)); - auto input_shape_t = input_shape->vec(); - const int64 N = input_shape_t(0); + const int64 N = input_shape_vec(0); Tensor sparse_handles(DT_INT64, TensorShape({N})); auto sparse_handles_t = sparse_handles.vec(); @@ -273,7 +285,7 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp { // minibatch entries. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( - input_shape_t.data() + 1, + input_shape_vec.data() + 1, input_shape->NumElements() - 1, &output_shape)); // Get groups by minibatch dimension From 59b7577f91026945305c021adda68a467df2e921 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 17:33:11 -0700 Subject: [PATCH 122/243] Prevent another division by zero. PiperOrigin-RevId: 369338598 Change-Id: I55471d363e401fdcf8d259670ad4eef672b731e2 --- tensorflow/core/kernels/conv_grad_shape_utils.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_shape_utils.cc b/tensorflow/core/kernels/conv_grad_shape_utils.cc index 7857257658fcff..ce90e866342311 100644 --- a/tensorflow/core/kernels/conv_grad_shape_utils.cc +++ b/tensorflow/core/kernels/conv_grad_shape_utils.cc @@ -126,6 +126,10 @@ Status ConvBackpropComputeDimensionsV2( // dimensions of the filter Tensor. VLOG(2) << "input vs filter_in depth " << dims->in_depth << " " << filter_shape.dim_size(num_dims - 2); + if (filter_shape.dim_size(num_dims - 2) <= 0) { + return errors ::InvalidArgument( + label, ": filter depth must be strictly greated than zero"); + } if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) { return errors::InvalidArgument( label, ": input depth must be evenly divisible by filter depth"); From 58bdebb1c7b1d5685e14d830300f01b53a2f5877 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 18:58:47 -0700 Subject: [PATCH 123/243] Fix one FPE and remove two CHECK-fails. PiperOrigin-RevId: 369349640 Change-Id: I1fedbfc2b5bab635c5cb51f103d7c9176f79831a --- tensorflow/core/kernels/quantized_conv_ops.cc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc index 5b3570edff5fee..1b83c09e1f7f18 100644 --- a/tensorflow/core/kernels/quantized_conv_ops.cc +++ b/tensorflow/core/kernels/quantized_conv_ops.cc @@ -18,6 +18,8 @@ limitations under the License. #include #include +#include "tensorflow/core/platform/errors.h" + #define EIGEN_USE_THREADS #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK @@ -227,8 +229,12 @@ class Im2ColConvFunctor { return; } - CHECK_GT(output_width, 0); - CHECK_GT(output_height, 0); + OP_REQUIRES( + context, output_width > 0, + errors::InvalidArgument("output_width must be strictly positive")); + OP_REQUIRES( + context, output_height > 0, + errors::InvalidArgument("output_height must be strictly positive")); int filter_left_offset; int filter_top_offset; if (padding == VALID) { @@ -255,6 +261,9 @@ class Im2ColConvFunctor { // by the width, then the height. This is the standard memory order in the // image world if it helps to visualize it. const int filter_value_count = filter_width * filter_height * input_depth; + OP_REQUIRES(context, filter_value_count > 0, + errors::InvalidArgument( + "filter patch must contain at least one element")); const int64 patches_per_chunk = kMaxChunkSize / (filter_value_count * sizeof(T1)); const int64 chunk_value_count = From 781f6d685ada855359970c40650a0d497ea9668f Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 20 Apr 2021 10:52:46 -0700 Subject: [PATCH 124/243] Fix one division by zero PiperOrigin-RevId: 369474832 Change-Id: I1082858ed78d9b2e4738ce30b231955973d49e1e --- tensorflow/core/kernels/quantized_mul_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc index 4e191f162662bb..fb56f68bf14dbb 100644 --- a/tensorflow/core/kernels/quantized_mul_op.cc +++ b/tensorflow/core/kernels/quantized_mul_op.cc @@ -347,6 +347,11 @@ class QuantizedMulOp : public OpKernel { tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } + if (vector_num_elements == 0) { + context->SetStatus( + errors::InvalidArgument("vector must have at least 1 element")); + return; + } VectorTensorMultiply( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); From 1dcf51e75885d0048100414e5ef8016176c0b17a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 20 Apr 2021 14:45:33 -0700 Subject: [PATCH 125/243] Remove `OP_REQUIRES` call from helper function. Since `OP_REQUIRES` macro expands to a `return;` (among other), calling it in a helper function only ends the helper function's execution earlier, but the kernel will still run from start to end. Thus, all the expected validations are actually broken/useless as the code ploughs through the next crash anyway. PiperOrigin-RevId: 369524386 Change-Id: I54f6cf9328445675ccc392e661b04336b229c9da --- .../core/kernels/sparse/sparse_cholesky_op.cc | 67 ++++++++++--------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc index 3786033c98cd9f..ab328672702eff 100644 --- a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc +++ b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc @@ -17,6 +17,8 @@ limitations under the License. #include #include +#include "tensorflow/core/framework/op_requires.h" + #define EIGEN_USE_THREADS #include "third_party/eigen3/Eigen/Core" @@ -82,8 +84,8 @@ class CSRSparseCholeskyCPUOp : public OpKernel { int64 num_rows; int batch_size; - ValidateInputs(ctx, *input_matrix, input_permutation_indices, &batch_size, - &num_rows); + OP_REQUIRES_OK(ctx, ValidateInputs(*input_matrix, input_permutation_indices, + &batch_size, &num_rows)); // Allocate batch pointers. Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); @@ -226,49 +228,48 @@ class CSRSparseCholeskyCPUOp : public OpKernel { } private: - void ValidateInputs(OpKernelContext* ctx, - const CSRSparseMatrix& sparse_matrix, - const Tensor& permutation_indices, int* batch_size, - int64* num_rows) { - OP_REQUIRES(ctx, sparse_matrix.dtype() == DataTypeToEnum::value, - errors::InvalidArgument( - "Asked for a CSRSparseMatrix of type ", - DataTypeString(DataTypeToEnum::value), - " but saw dtype: ", DataTypeString(sparse_matrix.dtype()))); + Status ValidateInputs(const CSRSparseMatrix& sparse_matrix, + const Tensor& permutation_indices, int* batch_size, + int64* num_rows) { + if (sparse_matrix.dtype() != DataTypeToEnum::value) + return errors::InvalidArgument( + "Asked for a CSRSparseMatrix of type ", + DataTypeString(DataTypeToEnum::value), + " but saw dtype: ", DataTypeString(sparse_matrix.dtype())); const Tensor& dense_shape = sparse_matrix.dense_shape(); const int rank = dense_shape.dim_size(0); - OP_REQUIRES(ctx, rank == 2 || rank == 3, - errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", - "but dense_shape has size ", rank)); + if (rank < 2 || rank > 3) + return errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", + "but dense_shape has size ", rank); const int row_dim = (rank == 2) ? 0 : 1; auto dense_shape_vec = dense_shape.vec(); *num_rows = dense_shape_vec(row_dim); const int64 num_cols = dense_shape_vec(row_dim + 1); - OP_REQUIRES(ctx, *num_rows == num_cols, - errors::InvalidArgument("sparse matrix must be square; got: ", - *num_rows, " != ", num_cols)); + if (*num_rows != num_cols) + return errors::InvalidArgument( + "sparse matrix must be square; got: ", *num_rows, " != ", num_cols); const TensorShape& perm_shape = permutation_indices.shape(); - OP_REQUIRES( - ctx, perm_shape.dims() + 1 == rank, - errors::InvalidArgument( - "sparse matrix must have the same rank as permutation; got: ", rank, - " != ", perm_shape.dims(), " + 1.")); - OP_REQUIRES( - ctx, perm_shape.dim_size(rank - 2) == *num_rows, - errors::InvalidArgument( - "permutation must have the same number of elements in each batch " - "as the number of rows in sparse matrix; got: ", - perm_shape.dim_size(rank - 2), " != ", *num_rows)); + if (perm_shape.dims() + 1 != rank) + return errors::InvalidArgument( + "sparse matrix must have the same rank as permutation; got: ", rank, + " != ", perm_shape.dims(), " + 1."); + if (perm_shape.dim_size(rank - 2) != *num_rows) + return errors::InvalidArgument( + "permutation must have the same number of elements in each batch " + "as the number of rows in sparse matrix; got: ", + perm_shape.dim_size(rank - 2), " != ", *num_rows); *batch_size = sparse_matrix.batch_size(); if (*batch_size > 1) { - OP_REQUIRES( - ctx, perm_shape.dim_size(0) == *batch_size, - errors::InvalidArgument("permutation must have the same batch size " - "as sparse matrix; got: ", - perm_shape.dim_size(0), " != ", *batch_size)); + if (perm_shape.dim_size(0) != *batch_size) + return errors::InvalidArgument( + "permutation must have the same batch size " + "as sparse matrix; got: ", + perm_shape.dim_size(0), " != ", *batch_size); } + + return Status::OK(); } }; From 3089d54f8dcbdb6a0feefecf28d69e35ed6f1fb6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 16:20:48 -0700 Subject: [PATCH 126/243] Fix heap buffer overflow caused by rounding. This was hard to fix. Due to the way we compute the pixels that influence an output pixel in resized images, for certain input configuration we might have issued a read to a pixel that is outside of boundary of the original image. This is because of floating errors that affected truncation results. PiperOrigin-RevId: 369757871 Change-Id: If89425fff930983829a2168203c11858883eebc9 --- tensorflow/core/kernels/quantized_resize_bilinear_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc index 59e33d1ac31477..620005ddf36bd2 100644 --- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc +++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc @@ -64,6 +64,8 @@ inline void ComputeInterpolationWeights( std::max(static_cast(in_f), static_cast(0)); interpolation->upper[i] = std::min(static_cast(std::ceil(in)), in_size - 1); + interpolation->lower[i] = + std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast((in - in_f) * (1 << resolution)); From 5818bd926ce20fd7794704115abe0ef0f64c9337 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 13:03:47 -0700 Subject: [PATCH 127/243] Fix null CHECK issue with `tf.raw_ops.EncodePng`. PiperOrigin-RevId: 369717714 Change-Id: I24136cd99c20b8466671f4f93b670ef6f6dd1250 --- tensorflow/core/kernels/encode_png_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/encode_png_op.cc b/tensorflow/core/kernels/encode_png_op.cc index 8dbe1d377df5c6..09bcdbe5e3db0b 100644 --- a/tensorflow/core/kernels/encode_png_op.cc +++ b/tensorflow/core/kernels/encode_png_op.cc @@ -54,6 +54,8 @@ class EncodePngOp : public OpKernel { OP_REQUIRES(context, image.dims() == 3, errors::InvalidArgument("image must be 3-dimensional", image.shape().DebugString())); + OP_REQUIRES(context, image.NumElements() > 0, + errors::Internal("Invalid image provided.")); OP_REQUIRES( context, FastBoundsCheck(image.NumElements(), std::numeric_limits::max()), From 76ee87cbb100c600cddf6a26f697a30a3b50538d Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 15:57:36 -0700 Subject: [PATCH 128/243] Fix overflow CHECK issue with `tf.raw_ops.DrawBoundingBoxes`. PiperOrigin-RevId: 369753591 Change-Id: I3b45fc98ee0d28a3c20b7e9c995aa647c976ec40 --- .../core/kernels/draw_bounding_box_op.cc | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/tensorflow/core/kernels/draw_bounding_box_op.cc b/tensorflow/core/kernels/draw_bounding_box_op.cc index 30de99b7d560a2..73db76333f0862 100644 --- a/tensorflow/core/kernels/draw_bounding_box_op.cc +++ b/tensorflow/core/kernels/draw_bounding_box_op.cc @@ -147,22 +147,46 @@ class DrawBoundingBoxesOp : public OpKernel { // At this point, {min,max}_box_{row,col}_clamp are inside the // image. - CHECK_GE(min_box_row_clamp, 0); - CHECK_GE(max_box_row_clamp, 0); - CHECK_LT(min_box_row_clamp, height); - CHECK_LT(max_box_row_clamp, height); - CHECK_GE(min_box_col_clamp, 0); - CHECK_GE(max_box_col_clamp, 0); - CHECK_LT(min_box_col_clamp, width); - CHECK_LT(max_box_col_clamp, width); + OP_REQUIRES( + context, min_box_row_clamp >= 0, + errors::InvalidArgument("Min box row clamp is less than 0.")); + OP_REQUIRES( + context, max_box_row_clamp >= 0, + errors::InvalidArgument("Max box row clamp is less than 0.")); + OP_REQUIRES(context, min_box_row_clamp <= height, + errors::InvalidArgument( + "Min box row clamp is greater than height.")); + OP_REQUIRES(context, max_box_row_clamp <= height, + errors::InvalidArgument( + "Max box row clamp is greater than height.")); + + OP_REQUIRES( + context, min_box_col_clamp >= 0, + errors::InvalidArgument("Min box col clamp is less than 0.")); + OP_REQUIRES( + context, max_box_col_clamp >= 0, + errors::InvalidArgument("Max box col clamp is less than 0.")); + OP_REQUIRES(context, min_box_col_clamp <= width, + errors::InvalidArgument( + "Min box col clamp is greater than width.")); + OP_REQUIRES(context, max_box_col_clamp <= width, + errors::InvalidArgument( + "Max box col clamp is greater than width.")); // At this point, the min_box_row and min_box_col are either // in the image or above/left of it, and max_box_row and // max_box_col are either in the image or below/right or it. - CHECK_LT(min_box_row, height); - CHECK_GE(max_box_row, 0); - CHECK_LT(min_box_col, width); - CHECK_GE(max_box_col, 0); + + OP_REQUIRES( + context, min_box_row <= height, + errors::InvalidArgument("Min box row is greater than height.")); + OP_REQUIRES(context, max_box_row >= 0, + errors::InvalidArgument("Max box row is less than 0.")); + OP_REQUIRES( + context, min_box_col <= width, + errors::InvalidArgument("Min box col is greater than width.")); + OP_REQUIRES(context, max_box_col >= 0, + errors::InvalidArgument("Max box col is less than 0.")); // Draw top line. if (min_box_row >= 0) { From 271f936010b418c8bc809df2f2eb352e6fd05bb6 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 16:15:46 -0700 Subject: [PATCH 129/243] Validate inputs to `QuantizedMul` PiperOrigin-RevId: 369756982 Change-Id: I00d960cc3b9316fd7a86bd37a44e341c96e17624 --- tensorflow/core/kernels/quantized_mul_op.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc index 4e191f162662bb..b5353822396230 100644 --- a/tensorflow/core/kernels/quantized_mul_op.cc +++ b/tensorflow/core/kernels/quantized_mul_op.cc @@ -284,10 +284,22 @@ class QuantizedMulOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); - const float min_x = context->input(2).flat()(0); - const float max_x = context->input(3).flat()(0); - const float min_y = context->input(4).flat()(0); - const float max_y = context->input(5).flat()(0); + auto& min_x_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), + errors::InvalidArgument("min_x must be a scalar")); + const float min_x = min_x_tensor.flat()(0); + auto& max_x_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), + errors::InvalidArgument("max_x must be a scalar")); + const float max_x = max_x_tensor.flat()(0); + auto& min_y_tensor = context->input(4); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), + errors::InvalidArgument("min_y must be a scalar")); + const float min_y = min_y_tensor.flat()(0); + auto& max_y_tensor = context->input(5); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), + errors::InvalidArgument("max_y must be a scalar")); + const float max_y = max_y_tensor.flat()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { From 88f0f60ef6188c1afd5e217cfcfbb374f802f7a3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 21 Apr 2021 16:04:48 -0700 Subject: [PATCH 130/243] Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369755048 Change-Id: Ia1663e49ef8387d84baa2c15dccf3506adffde84 --- tensorflow/core/kernels/sparse_concat_op.cc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tensorflow/core/kernels/sparse_concat_op.cc b/tensorflow/core/kernels/sparse_concat_op.cc index 3b2a0cb0f34ed3..d49f92ea556eb2 100644 --- a/tensorflow/core/kernels/sparse_concat_op.cc +++ b/tensorflow/core/kernels/sparse_concat_op.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -66,13 +67,32 @@ class SparseConcatOp : public OpKernel { OP_REQUIRES(context, shapes.size() == N, errors::InvalidArgument("Expected ", N, " input shapes, got ", shapes.size())); + bool overflow_ocurred = false; for (int i = 0; i < N; i++) { + int64 new_num_elements = 1; OP_REQUIRES(context, TensorShapeUtils::IsVector(shapes[i].shape()), errors::InvalidArgument( "Input shapes should be a vector but received shape ", shapes[i].shape().DebugString(), " at position ", i)); + auto input_shape_vector = shapes[i].vec(); + for (int j = 0; j < input_shape_vector.size(); j++) { + new_num_elements = + MultiplyWithoutOverflow(new_num_elements, input_shape_vector(j)); + if (new_num_elements < 0) { + overflow_ocurred = true; + break; + } + } + + if (overflow_ocurred) { + break; + } } + OP_REQUIRES( + context, !overflow_ocurred, + errors::Internal("Encountered overflow from large input shape.")); + const TensorShape input_shape(shapes[0].vec()); const int input_rank = input_shape.dims(); const int concat_dim = (concat_dim_attr_ < 0) From 877d5fc1b2306af6202473b2ad62932b9ea953b7 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 17:00:39 -0700 Subject: [PATCH 131/243] Cherrypick:Validate min and max arguments to . --- .../core/kernels/quantized_resize_bilinear_op.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc index 59e33d1ac31477..26ecf207830f75 100644 --- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc +++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc @@ -701,8 +701,14 @@ class QuantizedResizeBilinearOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); - const float in_min = context->input(2).flat()(0); - const float in_max = context->input(3).flat()(0); + const auto& in_min_tensor = context->input(2); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), + errors::InvalidArgument("min must be a scalar")); + const float in_min = in_min_tensor.flat()(0); + const auto& in_max_tensor = context->input(3); + OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), + errors::InvalidArgument("max must be a scalar")); + const float in_max = in_max_tensor.flat()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context, input); From 4cf07bffd1ddc7e1dcad75ee84509bfb886da258 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 18:11:15 -0700 Subject: [PATCH 132/243] Validate arguments to `QuantizedReshape`. Ensure that validations from `Reshape` also terminate `QuantizedReshape` on failure. PiperOrigin-RevId: 369775421 Change-Id: If8c5342267aceea65b7cb83a4b183304886f1ce8 --- .../core/kernels/quantized_reshape_op.cc | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/quantized_reshape_op.cc b/tensorflow/core/kernels/quantized_reshape_op.cc index bd76c94edeea7a..682f4aaa1f79e7 100644 --- a/tensorflow/core/kernels/quantized_reshape_op.cc +++ b/tensorflow/core/kernels/quantized_reshape_op.cc @@ -17,6 +17,7 @@ limitations under the License. #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_op.h" @@ -30,9 +31,29 @@ class QuantizedReshapeOp : public ReshapeOp { void Compute(OpKernelContext* ctx) override { // This call processes inputs 1 and 2 to write output 0. ReshapeOp::Compute(ctx); + if (!ctx->status().ok()) { + return; + } + + const auto& input_min_float_tensor = ctx->input(2); + const auto& input_min_float_shape = input_min_float_tensor.shape(); + OP_REQUIRES(ctx, + TensorShapeUtils::IsScalar(input_min_float_shape) || + (TensorShapeUtils::IsVector(input_min_float_shape) && + (input_min_float_shape.dim_size(0) == 1)), + errors::InvalidArgument( + "input_min must be a scalar or a vector of 1 element")); + const float input_min_float = input_min_float_tensor.flat()(0); + const auto& input_max_float_tensor = ctx->input(3); + const auto& input_max_float_shape = input_max_float_tensor.shape(); + OP_REQUIRES(ctx, + TensorShapeUtils::IsScalar(input_max_float_shape) || + (TensorShapeUtils::IsVector(input_max_float_shape) && + (input_max_float_shape.dim_size(0) == 1)), + errors::InvalidArgument( + "input_max must be a scalar or a vector of 1 element")); + const float input_max_float = input_max_float_tensor.flat()(0); - const float input_min_float = ctx->input(2).flat()(0); - const float input_max_float = ctx->input(3).flat()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat()(0) = input_min_float; From 95c63a0513f092afa3abb67f12b3d7d596501248 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 21 Apr 2021 17:50:10 -0700 Subject: [PATCH 133/243] Fix issues in Conv2DBackpropFilter. PiperOrigin-RevId: 369772454 Change-Id: I49b465f2ae2ce91def61b56cea8000197d5177d8 --- tensorflow/core/kernels/conv_grad_filter_ops.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc index 594dbd0d0df74a..e2681cf8ac9154 100644 --- a/tensorflow/core/kernels/conv_grad_filter_ops.cc +++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc @@ -496,6 +496,14 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { const int filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.in_depth; + OP_REQUIRES( + context, + filter_total_size * dims.out_depth == filter_backprop->NumElements(), + errors::InvalidArgument( + "filter_size does not have enough elements, requested ", + filter_total_size * dims.out_depth, ", got ", + filter_backprop->NumElements())); + // The output image size is the spatial size of the output. const int output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; @@ -519,6 +527,11 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { const size_t work_unit_size = size_A + size_B + size_C; + OP_REQUIRES( + context, work_unit_size != 0, + errors::InvalidArgument( + "Work size for convolution would be 0, which is not acceptable")); + const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; From 45f35284fbe3ef9621fa8eb96765cac2df575278 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 22 Apr 2021 13:29:54 -0700 Subject: [PATCH 134/243] Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b --- tensorflow/core/kernels/string_ngrams_op.cc | 52 +++++++++++++++---- .../core/kernels/string_ngrams_op_test.cc | 34 ++++++++++++ 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc index 62d5d22413544f..1246c8ef0908b1 100644 --- a/tensorflow/core/kernels/string_ngrams_op.cc +++ b/tensorflow/core/kernels/string_ngrams_op.cc @@ -61,16 +61,28 @@ class StringNGramsOp : public tensorflow::OpKernel { OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat(); - // Validate that the splits are valid indices into data + // Validate that the splits are valid indices into data, only if there are + // splits specified. const int input_data_size = data->flat().size(); const int splits_vec_size = splits_vec.size(); - for (int i = 0; i < splits_vec_size; ++i) { - bool valid_splits = splits_vec(i) >= 0; - valid_splits = valid_splits && (splits_vec(i) <= input_data_size); - OP_REQUIRES( - context, valid_splits, - errors::InvalidArgument("Invalid split value ", splits_vec(i), - ", must be in [0,", input_data_size, "]")); + if (splits_vec_size > 0) { + int prev_split = splits_vec(0); + OP_REQUIRES(context, prev_split == 0, + errors::InvalidArgument("First split value must be 0, got ", + prev_split)); + for (int i = 1; i < splits_vec_size; ++i) { + bool valid_splits = splits_vec(i) >= prev_split; + valid_splits = valid_splits && (splits_vec(i) <= input_data_size); + OP_REQUIRES(context, valid_splits, + errors::InvalidArgument( + "Invalid split value ", splits_vec(i), ", must be in [", + prev_split, ", ", input_data_size, "]")); + prev_split = splits_vec(i); + } + OP_REQUIRES(context, prev_split == input_data_size, + errors::InvalidArgument( + "Last split value must be data size. Expected ", + input_data_size, ", got ", prev_split)); } // If there is no data or size, return an empty RT. @@ -173,13 +185,31 @@ class StringNGramsOp : public tensorflow::OpKernel { ngram->append(left_pad_); ngram->append(separator_); } + // Only output first num_tokens - 1 pairs of data and separator for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } - ngram->append(data[data_start_index + num_tokens - 1]); - for (int n = 0; n < right_padding; ++n) { - ngram->append(separator_); + // Handle case when there are no tokens or no right padding as these can + // result in consecutive separators. + if (num_tokens > 0) { + // If we have tokens, then output last and then pair each separator with + // the right padding that follows, to ensure ngram ends either with the + // token or with the right pad. + ngram->append(data[data_start_index + num_tokens - 1]); + for (int n = 0; n < right_padding; ++n) { + ngram->append(separator_); + ngram->append(right_pad_); + } + } else { + // If we don't have tokens, then the last item inserted into the ngram + // has been the separator from the left padding loop above. Hence, + // output right pad and separator and make sure to finish with a + // padding, not a separator. + for (int n = 0; n < right_padding - 1; ++n) { + ngram->append(right_pad_); + ngram->append(separator_); + } ngram->append(right_pad_); } diff --git a/tensorflow/core/kernels/string_ngrams_op_test.cc b/tensorflow/core/kernels/string_ngrams_op_test.cc index b89de9ad16dab8..0d52283bd8fb9d 100644 --- a/tensorflow/core/kernels/string_ngrams_op_test.cc +++ b/tensorflow/core/kernels/string_ngrams_op_test.cc @@ -542,6 +542,40 @@ TEST_F(NgramKernelTest, TestEmptyInput) { assert_int64_equal(expected_splits, *GetOutput(1)); } +TEST_F(NgramKernelTest, TestNoTokens) { + MakeOp("|", {3}, "L", "R", -1, false); + // Batch items are: + // 0: + // 1: "a" + AddInputFromArray(TensorShape({1}), {"a"}); + AddInputFromArray(TensorShape({3}), {0, 0, 1}); + TF_ASSERT_OK(RunOpKernel()); + + std::vector expected_values( + {"L|L|R", "L|R|R", // no input in first split + "L|L|a", "L|a|R", "a|R|R"}); // second split + std::vector expected_splits({0, 2, 5}); + + assert_string_equal(expected_values, *GetOutput(0)); + assert_int64_equal(expected_splits, *GetOutput(1)); +} + +TEST_F(NgramKernelTest, TestNoTokensNoPad) { + MakeOp("|", {3}, "", "", 0, false); + // Batch items are: + // 0: + // 1: "a" + AddInputFromArray(TensorShape({1}), {"a"}); + AddInputFromArray(TensorShape({3}), {0, 0, 1}); + TF_ASSERT_OK(RunOpKernel()); + + std::vector expected_values({}); + std::vector expected_splits({0, 0, 0}); + + assert_string_equal(expected_values, *GetOutput(0)); + assert_int64_equal(expected_splits, *GetOutput(1)); +} + TEST_F(NgramKernelTest, ShapeFn) { ShapeInferenceTestOp op("StringNGrams"); INFER_OK(op, "?;?", "[?];[?]"); From b7c33ff91b756a8fb8b9c383f1b990d7b4432743 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 22 Apr 2021 15:11:05 -0700 Subject: [PATCH 135/243] Fix `tf.raw_ops.CTCGreedyDecoder` CHECK failure. PiperOrigin-RevId: 369960465 Change-Id: If0b8b3264d5a47a24ac0970ed7b81ce6b4921fae --- tensorflow/core/kernels/ctc_decoder_ops.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc index 517612eecb6057..1075b17694ddcf 100644 --- a/tensorflow/core/kernels/ctc_decoder_ops.cc +++ b/tensorflow/core/kernels/ctc_decoder_ops.cc @@ -224,6 +224,8 @@ class CTCGreedyDecoderOp : public OpKernel { int prev_indices = -1; for (int t = 0; t < seq_len_t(b); ++t) { int max_class_indices; + OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0, + errors::InvalidArgument("Invalid input dimensions.")); log_prob_t(b, 0) += -RowMax(input_list_t[t], b, &max_class_indices); if (max_class_indices != blank_index && From 27cdadab71c9ac6dbf5d26c352822ac74e4326e4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 10:41:12 -0700 Subject: [PATCH 136/243] Fix crash in `SparseTensorToCSRSparseMatrixCPUFunctor` PiperOrigin-RevId: 370110290 Change-Id: I4451e92661a55c2180f80d38b67a9b50bf5edec5 --- tensorflow/core/kernels/sparse/kernels.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/sparse/kernels.cc b/tensorflow/core/kernels/sparse/kernels.cc index 0eea9f1feed5c3..dff9aeb83ccfec 100644 --- a/tensorflow/core/kernels/sparse/kernels.cc +++ b/tensorflow/core/kernels/sparse/kernels.cc @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace functor { @@ -63,6 +64,11 @@ Status SparseTensorToCSRSparseMatrixCPUFunctor::operator()( for (int64 i = 0; i < total_nnz; ++i) { // For now, the rows pointers store the corresponding row counts. + int64 ix = indices(i, 0) + 1; + if (ix >= csr_row_ptr.size()) { + return errors::InvalidArgument("Got an index ", ix, + " that is outside of csr_row_ptr"); + } csr_row_ptr(indices(i, 0) + 1) += 1; csr_col_ind(i) = indices(i, 1); } From ab3688e94d199011a95f061162fcfea05421e99a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 11:11:39 -0700 Subject: [PATCH 137/243] Prevent division by 0 in `QuantizedBiasAdd`. PiperOrigin-RevId: 370117454 Change-Id: I3804e2ac8dcc6d3afcc92e27853e2325a017ca4d --- tensorflow/core/kernels/quantized_bias_add_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_bias_add_op.cc b/tensorflow/core/kernels/quantized_bias_add_op.cc index 5457d290c2559c..db0e21a498011d 100644 --- a/tensorflow/core/kernels/quantized_bias_add_op.cc +++ b/tensorflow/core/kernels/quantized_bias_add_op.cc @@ -56,6 +56,8 @@ class QuantizedBiasAddOp : public OpKernel { "Must provide as many biases as the last dimension " "of the input tensor: ", bias.shape().DebugString(), " vs. ", input.shape().DebugString())); + OP_REQUIRES(context, bias.NumElements() > 0, + errors::InvalidArgument("Must provide at least 1 bias")); Tensor* output = nullptr; OP_REQUIRES_OK(context, From 57f9718251e7b39981308febf91a1650461445b3 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 11:40:06 -0700 Subject: [PATCH 138/243] Add missing validation in `QuantizedBatchNormWithGlobalNormalization` PiperOrigin-RevId: 370123451 Change-Id: Id234d6dab1ec21230bb8e503dba30f899af87f33 --- .../core/kernels/quantized_batch_norm_op.cc | 77 ++++++++++++++++--- 1 file changed, 67 insertions(+), 10 deletions(-) diff --git a/tensorflow/core/kernels/quantized_batch_norm_op.cc b/tensorflow/core/kernels/quantized_batch_norm_op.cc index b03da7ad17fab4..6dfe07f97a4007 100644 --- a/tensorflow/core/kernels/quantized_batch_norm_op.cc +++ b/tensorflow/core/kernels/quantized_batch_norm_op.cc @@ -173,20 +173,50 @@ class QuantizedBatchNormOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); - const float input_min = context->input(1).flat()(0); - const float input_max = context->input(2).flat()(0); + const auto& input_min_tensor = context->input(1); + OP_REQUIRES(context, input_min_tensor.NumElements() == 1, + errors::InvalidArgument("input_min must have 1 element")); + const float input_min = input_min_tensor.flat()(0); + const auto& input_max_tensor = context->input(2); + OP_REQUIRES(context, input_max_tensor.NumElements() == 1, + errors::InvalidArgument("input_max must have 1 element")); + const float input_max = input_max_tensor.flat()(0); const Tensor& mean = context->input(3); - const float mean_min = context->input(4).flat()(0); - const float mean_max = context->input(5).flat()(0); + const auto& mean_min_tensor = context->input(4); + OP_REQUIRES(context, mean_min_tensor.NumElements() == 1, + errors::InvalidArgument("mean_min must have 1 element")); + const float mean_min = mean_min_tensor.flat()(0); + const auto& mean_max_tensor = context->input(5); + OP_REQUIRES(context, mean_max_tensor.NumElements() == 1, + errors::InvalidArgument("mean_max must have 1 element")); + const float mean_max = mean_max_tensor.flat()(0); const Tensor& var = context->input(6); - const float var_min = context->input(7).flat()(0); - const float var_max = context->input(8).flat()(0); + const auto& var_min_tensor = context->input(7); + OP_REQUIRES(context, var_min_tensor.NumElements() == 1, + errors::InvalidArgument("var_min must have 1 element")); + const float var_min = var_min_tensor.flat()(0); + const auto& var_max_tensor = context->input(8); + OP_REQUIRES(context, var_max_tensor.NumElements() == 1, + errors::InvalidArgument("var_max must have 1 element")); + const float var_max = var_max_tensor.flat()(0); const Tensor& beta = context->input(9); - const float beta_min = context->input(10).flat()(0); - const float beta_max = context->input(11).flat()(0); + const auto& beta_min_tensor = context->input(10); + OP_REQUIRES(context, beta_min_tensor.NumElements() == 1, + errors::InvalidArgument("beta_min must have 1 element")); + const float beta_min = beta_min_tensor.flat()(0); + const auto& beta_max_tensor = context->input(11); + OP_REQUIRES(context, beta_max_tensor.NumElements() == 1, + errors::InvalidArgument("beta_max must have 1 element")); + const float beta_max = beta_max_tensor.flat()(0); const Tensor& gamma = context->input(12); - const float gamma_min = context->input(13).flat()(0); - const float gamma_max = context->input(14).flat()(0); + const auto& gamma_min_tensor = context->input(13); + OP_REQUIRES(context, gamma_min_tensor.NumElements() == 1, + errors::InvalidArgument("gamma_min must have 1 element")); + const float gamma_min = gamma_min_tensor.flat()(0); + const auto& gamma_max_tensor = context->input(14); + OP_REQUIRES(context, gamma_max_tensor.NumElements() == 1, + errors::InvalidArgument("gamma_max must have 1 element")); + const float gamma_max = gamma_max_tensor.flat()(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", @@ -203,6 +233,33 @@ class QuantizedBatchNormOp : public OpKernel { OP_REQUIRES(context, gamma.dims() == 1, errors::InvalidArgument("gamma must be 1-dimensional", gamma.shape().DebugString())); + OP_REQUIRES(context, mean.NumElements() > 1, + errors::InvalidArgument("Must have at least a mean value", + gamma.shape().DebugString())); + OP_REQUIRES(context, mean.NumElements() > 1, + errors::InvalidArgument("Must have at least a mean value")); + const auto last_dim = input.shape().dims() - 1; + OP_REQUIRES(context, + mean.shape().dim_size(0) == input.shape().dim_size(last_dim), + errors::InvalidArgument("Must provide as many means as the " + "last dimension of the input tensor: ", + mean.shape().DebugString(), " vs. ", + input.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == var.shape().dim_size(0), + errors::InvalidArgument( + "Mean and variance tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", var.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == beta.shape().dim_size(0), + errors::InvalidArgument( + "Mean and beta tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", beta.shape().DebugString())); + OP_REQUIRES( + context, mean.shape().dim_size(0) == gamma.shape().dim_size(0), + errors::InvalidArgument( + "Mean and gamma tensors must have the same shape: ", + mean.shape().DebugString(), " vs. ", gamma.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, From cac81f4123b88df0b4c3c6dfbce948c87e98c8ec Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 23 Apr 2021 12:00:12 -0700 Subject: [PATCH 139/243] Validate work in `QuantizedAdd`, ensure at least one element. PiperOrigin-RevId: 370127996 Change-Id: I57c6f3e01afdeada84737820a131590137463855 --- tensorflow/core/kernels/quantized_add_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/quantized_add_op.cc b/tensorflow/core/kernels/quantized_add_op.cc index 55c69de7d3ea6c..b186f00f15c061 100644 --- a/tensorflow/core/kernels/quantized_add_op.cc +++ b/tensorflow/core/kernels/quantized_add_op.cc @@ -538,6 +538,8 @@ class QuantizedAddOp : public OpKernel { tensor_min = min_x; tensor_max = max_x; } + OP_REQUIRES(context, vector_num_elements > 0, + errors::InvalidArgument("Must have some elements to add")); VectorTensorAddition( vector_data, vector_min, vector_max, vector_num_elements, tensor_data, tensor_min, tensor_max, tensor_num_elements, min_z_value, max_z_value, From 0e8c73892e0160a668a823d6f42c95eba88797bd Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Thu, 29 Apr 2021 08:38:16 -0700 Subject: [PATCH 140/243] Fix divide by zero error in `fractional_pool_common.cc`. PiperOrigin-RevId: 371126221 Change-Id: Iea4b2f363aaeb116ab460e3bc592c687484af344 --- tensorflow/core/kernels/fractional_avg_pool_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index dfc2382624e3fa..b8a5083e5340f1 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -80,6 +80,10 @@ class FractionalAvgPoolOp : public OpKernel { std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); + OP_REQUIRES( + context, pooling_ratio_[i] <= input_size[i], + errors::InvalidArgument( + "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { From 081f54161f9b10ef8591e4e092288e97473e457d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Sun, 23 May 2021 16:10:59 -0700 Subject: [PATCH 141/243] Apply suggestions from code review --- tensorflow/core/kernels/conv_grad_ops_3d.cc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 08e832760924e6..cc6e3ffd90075c 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -741,8 +741,6 @@ class Conv3DBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } -<<<<<<< HEAD -======= OP_REQUIRES(context, input_shape.dims() == 5, errors::InvalidArgument("input tensor must have 5 dimensions")); OP_REQUIRES( @@ -765,7 +763,6 @@ class Conv3DBackpropFilterOp : public OpKernel { " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ->>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( @@ -878,8 +875,6 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { filter_shape = context->input(1).shape(); } -<<<<<<< HEAD -======= OP_REQUIRES(context, input_shape.dims() == 5, errors::InvalidArgument("input tensor must have 5 dimensions")); OP_REQUIRES( @@ -902,7 +897,6 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ->>>>>>> 311403edbc9 (Eliminate a division by 0 in 3D convolutions.) ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( From fe0e8b46ceece26345d462c5ffca3ae18c69522c Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 26 Apr 2021 16:40:49 -0700 Subject: [PATCH 142/243] Fix `tf.raw_ops.SparseAdd ` invalid memory access failure. PiperOrigin-RevId: 370568774 Change-Id: I5f73b31c865f2948a1c8dfb7ebd22b3cfb6405bf --- tensorflow/core/kernels/sparse_add_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_add_op.cc b/tensorflow/core/kernels/sparse_add_op.cc index d16317af671dd6..87a17b08820be0 100644 --- a/tensorflow/core/kernels/sparse_add_op.cc +++ b/tensorflow/core/kernels/sparse_add_op.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" @@ -101,6 +102,10 @@ class SparseAddOp : public OpKernel { std::vector out_values; const int num_dims = a_shape->dim_size(0); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Invalid input_a shape. Received: ", + a_shape->DebugString())); + // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; From 44a32b1f98d0350a7bfaeb20517453ad672738d3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Mon, 26 Apr 2021 17:32:41 -0700 Subject: [PATCH 143/243] Fix `tf.raw_ops.QuantizeAndDequantizeV3` array index failure. PiperOrigin-RevId: 370577691 Change-Id: Ifeae64212f6bcd139435824fa2748d1329213c4c --- tensorflow/core/kernels/quantize_and_dequantize_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 8f71d09c0832e7..a66ba66a98a84f 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/core/framework/op_requires.h" #define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ @@ -151,6 +152,10 @@ class QuantizeAndDequantizeV3Op : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES(ctx, axis_ < input.dims(), + errors::InvalidArgument( + "Axis requested is larger than input dimensions. Axis: ", + axis_, " Input Dimensions: ", input.dims())); const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); From 20039b921ced6005d415be9d30edd7be27ee91b3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 27 Apr 2021 14:41:40 -0700 Subject: [PATCH 144/243] Fix overflow CHECK issue with `tf.raw_ops.UnsortedSegmentJoin`. PiperOrigin-RevId: 370766155 Change-Id: I33e7c6626224e1060a8a4ab51ad5d861c6d4c63e --- tensorflow/core/kernels/unsorted_segment_join_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/unsorted_segment_join_op.cc b/tensorflow/core/kernels/unsorted_segment_join_op.cc index f0b9388f7cff32..77771cf0fe077a 100644 --- a/tensorflow/core/kernels/unsorted_segment_join_op.cc +++ b/tensorflow/core/kernels/unsorted_segment_join_op.cc @@ -90,6 +90,8 @@ class UnsortedSegmentJoinOp : public OpKernel { const int32 segment_dims = segment_id_shape.dims(); const Tensor& num_segments_tensor = context->input(2); + OP_REQUIRES(context, num_segments_tensor.NumElements() != 0, + errors::InvalidArgument("Number of segments cannot be empty.")); auto num_segments = num_segments_tensor.scalar()(); OP_REQUIRES(context, segment_dims != 0, From 9cf9f990ce00f5808406f70cf6cc0d3a6b97d45e Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 11:30:28 -0700 Subject: [PATCH 145/243] Fix FPE issue with `tf.raw_ops.FusedBatchNorm`. PiperOrigin-RevId: 370948185 Change-Id: If0c8e0320062ed6363e94ff5fe38e6a301f69ac2 --- tensorflow/core/kernels/fused_batch_norm_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc index 83602cdee0847d..a4843ed527712e 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.cc +++ b/tensorflow/core/kernels/fused_batch_norm_op.cc @@ -301,6 +301,9 @@ struct FusedBatchNorm { const CPUDevice& d = context->eigen_device(); const int depth = x.dimension(3); + OP_REQUIRES( + context, depth != 0, + errors::Internal("The 4th element in the input shape cannot be 0.")); const int size = x.size(); const int rest_size = size / depth; Eigen::DSizes rest_by_depth(rest_size, depth); From 6d79ef2bcb23ccf1fe9270e90624295e3d4baf9e Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 15:00:39 -0700 Subject: [PATCH 146/243] Fix FPE issue in external Eigen source code issue with `tf.raw_ops.SparseMatMul`. PiperOrigin-RevId: 370992919 Change-Id: Icfb276fef5fb40928b27c3e44608d2aca72c9fd7 --- tensorflow/core/kernels/sparse_matmul_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/sparse_matmul_op.cc b/tensorflow/core/kernels/sparse_matmul_op.cc index a2ee69cecd7fe7..aa9692bd5420be 100644 --- a/tensorflow/core/kernels/sparse_matmul_op.cc +++ b/tensorflow/core/kernels/sparse_matmul_op.cc @@ -1039,6 +1039,10 @@ class SparseMatMulOp : public OpKernel { if (transpose_b) { // TODO(agarwal): avoid transposing the matrix here and directly handle // transpose in CreateDenseSlices. + OP_REQUIRES(ctx, right->dim_size(0) != 0, + errors::InvalidArgument("b has an entry 0 in it's shape.")); + OP_REQUIRES(ctx, right->dim_size(1) != 0, + errors::InvalidArgument("b has an entry 0 in it's shape.")); right_tr.reset( new Tensor(right->dtype(), TensorShape({right->dim_size(1), right->dim_size(0)}))); From eb45503d821cb8a9c8fb9c6b16ac298031bd2fa3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 12:24:18 -0700 Subject: [PATCH 147/243] Fix FPE issue with `tf.raw_ops.Reverse`. PiperOrigin-RevId: 371176973 Change-Id: Ic6d483bfc95313ec2299c2d1c956cfe96c96626c --- tensorflow/core/kernels/reverse_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/reverse_op.cc b/tensorflow/core/kernels/reverse_op.cc index 98bf8bf8e914c1..4a46bffc3e9c00 100644 --- a/tensorflow/core/kernels/reverse_op.cc +++ b/tensorflow/core/kernels/reverse_op.cc @@ -158,6 +158,12 @@ class ReverseOp : public OpKernel { void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); + // If input is provided, check to make sure the first dimension is valid. + if (input.dims() > 0) { + OP_REQUIRES( + context, input.dim_size(0) != 0, + errors::InvalidArgument("Invalid input first dimension. Found 0.")); + } const Tensor& dims = context->input(1); if (TensorShapeUtils::IsScalar(input.shape())) { From 3f9fac192d67207dcb8badfd1c34d6fd1350bbc5 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 28 Apr 2021 16:06:54 -0700 Subject: [PATCH 148/243] Fix OOB issue with `tf.raw_ops.SparseSparseMinimum`. PiperOrigin-RevId: 371005787 Change-Id: Ib686ccc077836e8b980b8b5a03936d36a8ecaf71 --- tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc index 09cb2a6a71c7c0..8f1d0e30a7648a 100644 --- a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc @@ -180,6 +180,11 @@ class SparseSparseBinaryOpShared : public OpKernel { " for dimension ", i)); } + OP_REQUIRES( + ctx, a_indices_t->dim_size(1) == b_indices_t->dim_size(1), + errors::InvalidArgument( + "Indices' dimensions do not match: got ", a_indices_t->dim_size(1), + " and ", b_indices_t->dim_size(1), " for the second dimension.")); const int num_dims = a_indices_t->dim_size(1); const auto a_indices_mat = a_indices_t->matrix(); const auto b_indices_mat = b_indices_t->matrix(); From 5973ff09bb621333c4fc0fb59cfe22527c46423f Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 17:58:08 -0700 Subject: [PATCH 149/243] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`. PiperOrigin-RevId: 371242872 Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4 --- tensorflow/core/util/sparse/sparse_tensor.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index d33bd03db29515..4cbfa83b21d4a5 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -610,6 +610,10 @@ Status SparseTensor::Split(const SparseTensor& input_tensor, for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); + if (slice_index >= num_values.size()) { + return errors::InvalidArgument("Slice index ", slice_index, + " is larger than num_split."); + } num_values[slice_index]++; } From 57fbb97d0bebae1225940f941452fcfc86cc62d5 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Fri, 30 Apr 2021 10:39:05 -0700 Subject: [PATCH 150/243] Fix the CHECK failure in tf.raw_ops.QuantizeAndDequantizeV2. --- tensorflow/core/kernels/quantize_and_dequantize_op.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 8f71d09c0832e7..784bc3302e2482 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -71,6 +71,13 @@ class QuantizeAndDequantizeV2Op : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES( + ctx, axis_ >= -1, + errors::InvalidArgument("Axis must be at least -1. Found ", axis_)); + OP_REQUIRES( + ctx, (axis_ == -1 || axis_ < input.shape().dims()), + errors::InvalidArgument("Shape must be at least rank ", axis_ + 1, + " but is rank ", input.shape().dims())); const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_); Tensor input_min_tensor; Tensor input_max_tensor; From c27493e6044ac20c3df47e6f84afb4c1645ce84f Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Mon, 3 May 2021 09:53:26 -0700 Subject: [PATCH 151/243] Fix heap buffer overflow in tf.raw_ops.UnicodeEncode. PiperOrigin-RevId: 371717714 Change-Id: If33443b28f158e58078f1268f6b92f2728d219e0 --- tensorflow/core/kernels/unicode_ops.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tensorflow/core/kernels/unicode_ops.cc b/tensorflow/core/kernels/unicode_ops.cc index 14ae49c837fef9..0e7a24ddd9be79 100644 --- a/tensorflow/core/kernels/unicode_ops.cc +++ b/tensorflow/core/kernels/unicode_ops.cc @@ -533,6 +533,17 @@ class UnicodeEncodeOp : public OpKernel { const Tensor& input_splits = context->input(1); const auto input_splits_flat = input_splits.flat(); + // Operation will treat first argument in input_splits as if it were zero + // regardless of its actual value since splits should begin with zero and + // end with the length of the input values vector. + OP_REQUIRES( + context, input_splits_flat(0) == 0, + errors::InvalidArgument("First value in input_splits must be zero.")); + OP_REQUIRES(context, + input_splits_flat(input_splits_flat.size() - 1) == + input_tensor_flat.size(), + errors::InvalidArgument("Last value in input_splits must be " + "equal to length of input_tensor.")); // Since we limit to a 2-D input (flat_values of rank 1 and a single splits // tensor), our output dimension will be 1 with it's size equal to the // number of splits (outer dimension or ragged tensor). @@ -548,6 +559,14 @@ class UnicodeEncodeOp : public OpKernel { for (int i = 1; i < input_splits_flat.size(); ++i) { icu::UnicodeString unicode_string; icu::UnicodeStringAppendable appendable_unicode_string(unicode_string); + OP_REQUIRES( + context, input_splits_flat(i - 1) <= input_splits_flat(i), + errors::InvalidArgument( + "Values in input_splits must be equal or in ascending order.")); + OP_REQUIRES( + context, input_splits_flat(i) <= input_tensor_flat.size(), + errors::InvalidArgument("Values in input_splits must be less than or " + "equal to input_tensor length.")); for (; idx < input_splits_flat(i); ++idx) { int32 code_point = input_tensor_flat(idx); // Check for invalid code point From 52872dc27b5c0b4c570486af2d81f4911fe0e047 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 17:58:08 -0700 Subject: [PATCH 152/243] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`. PiperOrigin-RevId: 371242872 Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4 --- tensorflow/core/util/sparse/sparse_tensor.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h index d33bd03db29515..4cbfa83b21d4a5 100644 --- a/tensorflow/core/util/sparse/sparse_tensor.h +++ b/tensorflow/core/util/sparse/sparse_tensor.h @@ -610,6 +610,10 @@ Status SparseTensor::Split(const SparseTensor& input_tensor, for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); + if (slice_index >= num_values.size()) { + return errors::InvalidArgument("Slice index ", slice_index, + " is larger than num_split."); + } num_values[slice_index]++; } From 09481ef55f8dc43e77a139041a02f078b53e2548 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 13:45:57 -0700 Subject: [PATCH 153/243] Fix heap-buffer-overflow issue with `tf.raw_ops.RaggedTensorToTensor`. PiperOrigin-RevId: 371986929 Change-Id: I79ab962a22c5867f36f7f45b780a1ac881b1dbdd --- tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index 963c7eaf325659..cf3efcd49b4886 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -313,6 +313,12 @@ class RaggedTensorToTensorBaseOp : public OpKernel { output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: + if (row_partition_tensor.size() - 1 > parent_output_index.size()) { + return errors::InvalidArgument( + "Row partition size is greater than output size: ", + row_partition_tensor.size() - 1, " > ", + parent_output_index.size()); + } CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); From 8849f473e567aae07c38f89e69c51823258d5c98 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 15:46:30 -0700 Subject: [PATCH 154/243] Fix a check fail PiperOrigin-RevId: 372011072 Change-Id: I1062cfaed0aa16884e9a16312483794d188db76f --- tensorflow/core/kernels/load_and_remap_matrix_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/load_and_remap_matrix_op.cc b/tensorflow/core/kernels/load_and_remap_matrix_op.cc index cb0245a9b61261..5ec28c70358132 100644 --- a/tensorflow/core/kernels/load_and_remap_matrix_op.cc +++ b/tensorflow/core/kernels/load_and_remap_matrix_op.cc @@ -123,6 +123,11 @@ class LoadAndRemapMatrixOp : public OpKernel { // Processes the checkpoint source and the provided Tensor name. const Tensor* ckpt_path_t; OP_REQUIRES_OK(context, context->input("ckpt_path", &ckpt_path_t)); + OP_REQUIRES( + context, ckpt_path_t->NumElements() == 1, + errors::InvalidArgument("The `ckpt_path` tensor must have exactly one " + "element, got tensor of shape ", + ckpt_path_t->shape().DebugString())); const string& ckpt_path = ckpt_path_t->scalar()(); const Tensor* old_tensor_name_t; OP_REQUIRES_OK(context, From 723587ceeb11e65bf62c767336830021574c05bd Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 17:11:46 -0700 Subject: [PATCH 155/243] Fix a check fail in Fast Fourier implementation PiperOrigin-RevId: 372026629 Change-Id: Id05c3362aa575271bc3e06b16316c9037085fc11 --- tensorflow/core/kernels/fft_ops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/fft_ops.cc b/tensorflow/core/kernels/fft_ops.cc index 058435948394c5..f396cc0a577ae1 100644 --- a/tensorflow/core/kernels/fft_ops.cc +++ b/tensorflow/core/kernels/fft_ops.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS // See docs in ../ops/fft_ops.cc. @@ -261,6 +262,9 @@ class FFTCPU : public FFTBase { i == FFTRank ? fft_shape[i - 1] / 2 + 1 : fft_shape[i - 1]; full_fft_shape.AddDim(fft_shape[i - 1]); } + OP_REQUIRES(ctx, full_fft_shape.num_elements() > 0, + errors::InvalidArgument("Obtained a FFT shape of 0 elements: ", + full_fft_shape.DebugString())); Tensor temp; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum::v(), From 292ae19bf5e4a860d9d152b5f68781f241f5d3d0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 17:42:54 -0700 Subject: [PATCH 156/243] Prevent check fail in FFT PiperOrigin-RevId: 372031044 Change-Id: I50994e3e8a5d1342d01bde80256f6bf2730ca299 --- tensorflow/core/kernels/fft_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/fft_ops.cc b/tensorflow/core/kernels/fft_ops.cc index 058435948394c5..462e3f28bbadfb 100644 --- a/tensorflow/core/kernels/fft_ops.cc +++ b/tensorflow/core/kernels/fft_ops.cc @@ -221,6 +221,9 @@ class FFTCPU : public FFTBase { input_slice_sizes[i] = fft_shape[i - 1]; temp_shape.AddDim(fft_shape[i - 1]); } + OP_REQUIRES(ctx, temp_shape.num_elements() > 0, + errors::InvalidArgument("Obtained a FFT shape of 0 elements: ", + temp_shape.DebugString())); auto output = out->flat_inner_dims(); const Eigen::DSizes zero_start_indices; From 5fbc82ecec988def44332702d747a4e6520494ca Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 18:06:03 -0700 Subject: [PATCH 157/243] Fix multiple issues in EditDistance PiperOrigin-RevId: 372033948 Change-Id: Ieb957c29894af05bdfeb1a0402fced808dfcfd7b --- tensorflow/core/kernels/edit_distance_op.cc | 47 +++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tensorflow/core/kernels/edit_distance_op.cc b/tensorflow/core/kernels/edit_distance_op.cc index 4aecdc9e414d36..386a1af08409f6 100644 --- a/tensorflow/core/kernels/edit_distance_op.cc +++ b/tensorflow/core/kernels/edit_distance_op.cc @@ -64,6 +64,12 @@ Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, return errors::InvalidArgument( "truth_shape should be a vector, but got shape: ", truth_shape.shape().DebugString()); + if (hypothesis_values.NumElements() != hypothesis_indices.dim_size(0)) + return errors::InvalidArgument( + "Expected hypothesis_values.NumElements == " + "#rows(hypothesis_indices), their shapes are: ", + hypothesis_values.shape().DebugString(), " and ", + hypothesis_indices.shape().DebugString()); if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1)) return errors::InvalidArgument( "Expected hypothesis_shape.NumElements == " @@ -75,6 +81,12 @@ Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, "Input SparseTensors must have rank at least 2, but truth_shape " "rank is: ", truth_shape.NumElements()); + if (truth_values.NumElements() != truth_indices.dim_size(0)) + return errors::InvalidArgument( + "Expected truth_values.NumElements == " + "#rows(truth_indices), their shapes are: ", + truth_values.shape().DebugString(), " and ", + truth_indices.shape().DebugString()); if (truth_shape.NumElements() != truth_indices.dim_size(1)) return errors::InvalidArgument( "Expected truth_shape.NumElements == " @@ -153,6 +165,11 @@ class EditDistanceOp : public OpKernel { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } + const auto output_elements = output_shape.num_elements(); + OP_REQUIRES( + ctx, output_elements > 0, + errors::InvalidArgument("Got output shape ", output_shape.DebugString(), + " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); @@ -185,6 +202,12 @@ class EditDistanceOp : public OpKernel { if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); @@ -194,6 +217,12 @@ class EditDistanceOp : public OpKernel { } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits::infinity(); @@ -202,6 +231,12 @@ class EditDistanceOp : public OpKernel { } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of " + "the buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } @@ -212,6 +247,12 @@ class EditDistanceOp : public OpKernel { auto hypothesis_seq = hypothesis_j.values(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of the " + "buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits::infinity(); @@ -224,6 +265,12 @@ class EditDistanceOp : public OpKernel { auto truth_seq = truth_i.values(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64{0}); + OP_REQUIRES( + ctx, loc < output_elements, + errors::Internal("Got an inner product ", loc, + " which would require in writing to outside of the " + "buffer for the output tensor (max elements ", + output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } From 2a996db49a8c1ddf5321f99718e8a5cff6ac0e50 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 18:33:28 -0700 Subject: [PATCH 158/243] Add missing validations in dillation ops. PiperOrigin-RevId: 372037158 Change-Id: I4ee304c84a02550c030288a6534000b934fc1599 --- tensorflow/core/kernels/dilation_ops.cc | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/dilation_ops.cc b/tensorflow/core/kernels/dilation_ops.cc index f2e7b8a857a0f4..fdb2716eff9284 100644 --- a/tensorflow/core/kernels/dilation_ops.cc +++ b/tensorflow/core/kernels/dilation_ops.cc @@ -130,6 +130,7 @@ class DilationOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Output tensor is of the following dimensions: // [ batch, out_rows, out_cols, depth ] @@ -229,6 +230,7 @@ class DilationBackpropInputOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] @@ -318,8 +320,10 @@ struct DilationBackpropInput { } } } - in_backprop(b, h_in_max, w_in_max, d) += - out_backprop(b, h_out, w_out, d); + if (h_in_max < input_rows && w_in_max < input_cols) { + in_backprop(b, h_in_max, w_in_max, d) += + out_backprop(b, h_out, w_out, d); + } } } } @@ -349,6 +353,7 @@ class DilationBackpropFilterOp : public OpKernel { ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); + if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] @@ -438,8 +443,10 @@ struct DilationBackpropFilter { } } } - filter_backprop(h_max, w_max, d) += - out_backprop(b, h_out, w_out, d); + if (h_max < filter_rows && w_max < filter_cols) { + filter_backprop(h_max, w_max, d) += + out_backprop(b, h_out, w_out, d); + } } } } From e458c49d3f9251dd0da911984598d063afb8a803 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 4 May 2021 19:14:24 -0700 Subject: [PATCH 159/243] Fix breakage in parameterized_truncated_normal_op.cc PiperOrigin-RevId: 372041718 Change-Id: Iff79e77a2bb27032423eefcb84211627b27dfe81 --- tensorflow/core/kernels/parameterized_truncated_normal_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc index b0b720b4e030fa..62d996ed00ab30 100644 --- a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc +++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc @@ -336,6 +336,9 @@ class ParameterizedTruncatedNormalOp : public OpKernel { ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); + OP_REQUIRES(ctx, shape_tensor.NumElements() > 0, + errors::InvalidArgument("Shape tensor must not be empty, got ", + shape_tensor.DebugString())); int32 num_batches = shape_tensor.flat()(0); int32 samples_per_batch = 1; From 6675965ab64ae542a59db7f8735ca0e7ba056e17 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 21:30:50 -0700 Subject: [PATCH 160/243] Fix heap-buffer-overflow issue with `tf.raw_ops.SparseDenseCwiseMul`. PiperOrigin-RevId: 372054410 Change-Id: Ifcce0491e2e3816838c87e73be30a1e61b65174d --- tensorflow/core/kernels/sparse_dense_binary_op_shared.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc index 3a5e66a0e73ea6..dac4a3d3e6bfcd 100644 --- a/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc @@ -78,6 +78,11 @@ class SparseDenseBinaryOpShared : public OpKernel { "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); + OP_REQUIRES( + ctx, values_t->dim_size(0) == indices_t->dim_size(0), + errors::InvalidArgument( + "The first dimension of values and indices should match. (", + values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); const auto indices_mat = indices_t->matrix(); const auto shape_vec = shape_t->vec(); From bd4e7124305cf508b54f169d2244f2980e666920 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 08:38:03 -0700 Subject: [PATCH 161/243] Fix heap buffer overflow PiperOrigin-RevId: 372132844 Change-Id: Idef9895efaf145f2b1c23d31983601ec980cd5e4 --- tensorflow/core/kernels/maxpooling_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..f1193f0ef4887c 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -984,6 +984,9 @@ struct LaunchMaxPoolingGradWithArgmax { const int input_start = start * input_size_per_batch; const int input_end = limit * input_size_per_batch; for (int64 index = input_start; index < input_end; index++) { + if (index >= argmax.NumElements()) { + break; + } int64 grad_out_index = argmax_flat(index); if (!include_batch_in_index) { const int64 cur_batch = index / input_size_per_batch; From 2b028d63551b82839d9ebf6f74a1bff957bc14b0 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Wed, 5 May 2021 08:16:13 -0700 Subject: [PATCH 162/243] Fix out of bound read in requantization_range_op.cc PiperOrigin-RevId: 372129031 Change-Id: Ie684ab98a3840c5186ead3eafffc0e0ed0e8030d --- tensorflow/core/kernels/requantization_range_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/requantization_range_op.cc b/tensorflow/core/kernels/requantization_range_op.cc index cc6e891a6b352b..f6e217499d1983 100644 --- a/tensorflow/core/kernels/requantization_range_op.cc +++ b/tensorflow/core/kernels/requantization_range_op.cc @@ -46,6 +46,10 @@ class RequantizationRangeOp : public OpKernel { void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); + OP_REQUIRES(ctx, ctx->input(1).NumElements() > 0, + errors::InvalidArgument("Input min must not be empty.")); + OP_REQUIRES(ctx, ctx->input(2).NumElements() > 0, + errors::InvalidArgument("Input max must not be empty.")); const float input_min_float = ctx->input(1).flat()(0); const float input_max_float = ctx->input(2).flat()(0); Tensor* output_min = nullptr; From ecf19b3593688c7f64576830c1a7b5f3ff1b0110 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 18:05:46 -0700 Subject: [PATCH 163/243] Fix memory corruption issue with `tf.raw_ops.DrawBoundingBoxesV2`. PiperOrigin-RevId: 372033910 Change-Id: I8a9f4efc1c8ddaacbc26ec1fbe4bfdd6791c226d --- tensorflow/core/kernels/draw_bounding_box_op.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/core/kernels/draw_bounding_box_op.cc b/tensorflow/core/kernels/draw_bounding_box_op.cc index 73db76333f0862..926ea368a58ba8 100644 --- a/tensorflow/core/kernels/draw_bounding_box_op.cc +++ b/tensorflow/core/kernels/draw_bounding_box_op.cc @@ -73,6 +73,12 @@ class DrawBoundingBoxesOp : public OpKernel { errors::InvalidArgument("Channel depth should be either 1 (GRY), " "3 (RGB), or 4 (RGBA)")); + OP_REQUIRES( + context, boxes.dim_size(2) == 4, + errors::InvalidArgument( + "The size of the third dimension of the box must be 4. Received: ", + boxes.dim_size(2))); + const int64 batch_size = images.dim_size(0); const int64 height = images.dim_size(1); const int64 width = images.dim_size(2); From a8eb24879a39e7ba7e66fdcd48ee6c44efdde766 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 11:40:50 -0700 Subject: [PATCH 164/243] Add several missing validations in SDCA PiperOrigin-RevId: 372172877 Change-Id: Id366da962432e18dcbfac847d11e98488bebb70a --- tensorflow/core/kernels/sdca_internal.cc | 36 ++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc index cbc754af0e9bb1..11a3be8bf46a76 100644 --- a/tensorflow/core/kernels/sdca_internal.cc +++ b/tensorflow/core/kernels/sdca_internal.cc @@ -99,6 +99,10 @@ Status ModelWeights::Initialize(OpKernelContext* const context) { OpInputList sparse_weights_inputs; TF_RETURN_IF_ERROR( context->input_list("sparse_weights", &sparse_weights_inputs)); + if (sparse_indices_inputs.size() != sparse_weights_inputs.size()) + return errors::InvalidArgument( + "sparse_indices and sparse_weights must have the same length, got ", + sparse_indices_inputs.size(), " and ", sparse_weights_inputs.size()); OpInputList dense_weights_inputs; TF_RETURN_IF_ERROR( context->input_list("dense_weights", &dense_weights_inputs)); @@ -106,10 +110,20 @@ Status ModelWeights::Initialize(OpKernelContext* const context) { OpOutputList sparse_weights_outputs; TF_RETURN_IF_ERROR(context->output_list("out_delta_sparse_weights", &sparse_weights_outputs)); + if (sparse_weights_outputs.size() != sparse_weights_inputs.size()) + return errors::InvalidArgument( + "out_delta_sparse_weights and sparse_weights must have the same " + "length, got ", + sparse_weights_outputs.size(), " and ", sparse_weights_inputs.size()); OpOutputList dense_weights_outputs; TF_RETURN_IF_ERROR( context->output_list("out_delta_dense_weights", &dense_weights_outputs)); + if (dense_weights_outputs.size() != dense_weights_inputs.size()) + return errors::InvalidArgument( + "out_delta_dense_weights and dense_weights must have the same length, " + "got ", + dense_weights_outputs.size(), " and ", dense_weights_inputs.size()); for (int i = 0; i < sparse_weights_inputs.size(); ++i) { Tensor* delta_t; @@ -327,13 +341,28 @@ Status Examples::Initialize(OpKernelContext* const context, OpInputList sparse_example_indices_inputs; TF_RETURN_IF_ERROR(context->input_list("sparse_example_indices", &sparse_example_indices_inputs)); + if (sparse_example_indices_inputs.size() != num_sparse_features) + return errors::InvalidArgument( + "Expected ", num_sparse_features, + " tensors in sparse_example_indices but got ", + sparse_example_indices_inputs.size()); OpInputList sparse_feature_indices_inputs; TF_RETURN_IF_ERROR(context->input_list("sparse_feature_indices", &sparse_feature_indices_inputs)); + if (sparse_feature_indices_inputs.size() != num_sparse_features) + return errors::InvalidArgument( + "Expected ", num_sparse_features, + " tensors in sparse_feature_indices but got ", + sparse_feature_indices_inputs.size()); OpInputList sparse_feature_values_inputs; if (num_sparse_features_with_values > 0) { TF_RETURN_IF_ERROR(context->input_list("sparse_feature_values", &sparse_feature_values_inputs)); + if (sparse_feature_values_inputs.size() != num_sparse_features_with_values) + return errors::InvalidArgument( + "Expected ", num_sparse_features_with_values, + " tensors in sparse_feature_values but got ", + sparse_feature_values_inputs.size()); } const Tensor* example_weights_t; @@ -400,6 +429,13 @@ Status Examples::CreateSparseFeatureRepresentation( sparse_example_indices_inputs[i].template flat(); auto feature_indices = sparse_feature_indices_inputs[i].template flat(); + if (example_indices.size() != feature_indices.size()) { + mutex_lock l(mu); + result = errors::InvalidArgument( + "Found mismatched example_indices and feature_indices [", + example_indices, "] vs [", feature_indices, "]"); + return; + } // Parse features for each example. Features for a particular example // are at the offsets (start_id, end_id] From a59bcd733a29ee09ceb06423fc0232db5384df75 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 12:07:57 -0700 Subject: [PATCH 165/243] Add missing validations to reverse_sequence_op PiperOrigin-RevId: 372178683 Change-Id: Iac97ebab5b342f1262c77a7d9bcb4267b305ce5b --- tensorflow/core/kernels/reverse_sequence_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/reverse_sequence_op.cc b/tensorflow/core/kernels/reverse_sequence_op.cc index 0e112133915d5a..490456fddd76b9 100644 --- a/tensorflow/core/kernels/reverse_sequence_op.cc +++ b/tensorflow/core/kernels/reverse_sequence_op.cc @@ -113,6 +113,10 @@ class ReverseSequenceOp : public OpKernel { : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("batch_dim", &batch_dim_)); OP_REQUIRES_OK(context, context->GetAttr("seq_dim", &seq_dim_)); + OP_REQUIRES(context, batch_dim_ >= 0, + errors::InvalidArgument("Invalid batch_dim ", batch_dim_)); + OP_REQUIRES(context, seq_dim_ >= 0, + errors::InvalidArgument("Invalid seq_dim ", seq_dim_)); } void Compute(OpKernelContext* context) override { From 92edd8f06b119be046094797d3212f78c85e2f5d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 14:34:54 -0700 Subject: [PATCH 166/243] Don't do any work if output tensor is null (prevent div by 0) PiperOrigin-RevId: 372208700 Change-Id: Iea6b6293e887ade8538facfdb50fb931e17f511e --- tensorflow/core/kernels/maxpooling_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..9ac7ac6132c4e1 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -1049,6 +1049,8 @@ class MaxPoolingGradWithArgmaxOp : public OpKernel { OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); + if (out_shape.num_elements() == 0) return; // nothing to be done + LaunchMaxPoolingGradWithArgmax::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } From 5c0f8d9abe5a0a7a70de03eb359e8ffe8adeb8cb Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 15:20:14 -0700 Subject: [PATCH 167/243] Add missing validation to pooling_ops_3d PiperOrigin-RevId: 372218727 Change-Id: I6b9ed4266aa7286c02f1f230d7bea922c1be547e --- tensorflow/core/kernels/pooling_ops_3d.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 7345ccf69eeba9..40c69a4f2a8501 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -702,6 +702,19 @@ class MaxPooling3dGradGradOp : public OpKernel { OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); + // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must + // have elements. + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_in: ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_out: ", + tensor_out.DebugString())); + OP_REQUIRES( + context, out_grad_backprop.NumElements() > 0, + errors::InvalidArgument("received empty tensor out_grad_backprop: ", + out_grad_backprop.DebugString())); + LaunchMaxPooling3dGradGradOp::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } From d2eaffe94684081a59c820cc5b2ec037020758ec Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 18:07:02 -0700 Subject: [PATCH 168/243] [CherryPick]Add missing validation, prevent heap OOB --- tensorflow/core/kernels/pooling_ops_3d.cc | 25 +++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 7345ccf69eeba9..f292032d883b4f 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -697,11 +697,36 @@ class MaxPooling3dGradGradOp : public OpKernel { Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; + if (!context->status().ok()) return; // params is invalid Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); + // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must + // have elements. + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_in: ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("received empty tensor tensor_out: ", + tensor_out.DebugString())); + OP_REQUIRES( + context, out_grad_backprop.NumElements() > 0, + errors::InvalidArgument("received empty tensor out_grad_backprop: ", + out_grad_backprop.DebugString())); + OP_REQUIRES(context, + tensor_in.NumElements() == out_grad_backprop.NumElements(), + errors::InvalidArgument("tensor_in and out_grad_backprop must " + "have same number of elements, got <", + tensor_in.DebugString(), "> and <", + out_grad_backprop.DebugString(), ">")); + OP_REQUIRES( + context, tensor_out.NumElements() == output->NumElements(), + errors::InvalidArgument( + "tensor_out and output must have same number of elements, got <", + tensor_out.DebugString(), "> and <", output->DebugString(), ">")); + LaunchMaxPooling3dGradGradOp::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } From b4abc8bc80ad14751382516450ec681a99d156db Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 5 May 2021 21:09:21 -0700 Subject: [PATCH 169/243] Fix nullptr deref in `tf.raw_ops.CTCLoss`. PiperOrigin-RevId: 372266334 Change-Id: Ic52c3e9f13a38f54482d670907eda1688450862b --- tensorflow/core/kernels/ctc_loss_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc index 995d28a158cfa0..9debca01409c0f 100644 --- a/tensorflow/core/kernels/ctc_loss_op.cc +++ b/tensorflow/core/kernels/ctc_loss_op.cc @@ -67,6 +67,9 @@ class CTCLossOp : public OpKernel { const TensorShape& inputs_shape = inputs->shape(); const int64 max_time = inputs_shape.dim_size(0); + OP_REQUIRES(ctx, max_time != 0, + errors::InvalidArgument( + "Max time or first dimension of input cannot be 0.")); const int64 batch_size = inputs_shape.dim_size(1); const int64 num_classes_raw = inputs_shape.dim_size(2); OP_REQUIRES( From 67f1f7bbce498bdef38d7d3ec26da301e6310e53 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Wed, 5 May 2021 17:33:47 -0700 Subject: [PATCH 170/243] Fix OOB read issue with `tf.raw_ops.CTCLoss`. PiperOrigin-RevId: 372242187 Change-Id: I347228ed8c04e1d2eb9d2479ae52f51d1b512c6e --- tensorflow/core/kernels/ctc_loss_op.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc index 995d28a158cfa0..d31f58d5c1312c 100644 --- a/tensorflow/core/kernels/ctc_loss_op.cc +++ b/tensorflow/core/kernels/ctc_loss_op.cc @@ -62,6 +62,10 @@ class CTCLossOp : public OpKernel { errors::InvalidArgument("sequence_length is not a vector")); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(labels_indices->shape()), errors::InvalidArgument("labels_indices is not a matrix")); + OP_REQUIRES(ctx, labels_indices->dim_size(1) > 1, + errors::InvalidArgument( + "labels_indices second dimension must be >= 1. Received ", + labels_indices->dim_size(1))); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(labels_values->shape()), errors::InvalidArgument("labels_values is not a vector")); From f6852e7ee8753180ee8d0812c646990079693ad0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 09:51:26 -0700 Subject: [PATCH 171/243] Fix assertion failure in pooling_ops_3d PiperOrigin-RevId: 372364504 Change-Id: Iecde4fe26b47a8fa935d6e2611b5585ed5777781 --- tensorflow/core/kernels/pooling_ops_3d.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/pooling_ops_3d.cc b/tensorflow/core/kernels/pooling_ops_3d.cc index 7345ccf69eeba9..e85f9bc9829c0f 100644 --- a/tensorflow/core/kernels/pooling_ops_3d.cc +++ b/tensorflow/core/kernels/pooling_ops_3d.cc @@ -387,6 +387,19 @@ struct LaunchAvgPooling3dGradOp { const std::array& output_shape, const std::array& padding, TensorFormat data_format, Tensor* output) { + OP_REQUIRES( + context, tensor_in_shape.dim_size(0) == out_backprop.dim_size(0), + errors::InvalidArgument( + "Expected first dimension of tensor_in_shape and " + "out_backprop to match, got ", + tensor_in_shape.dim_size(0), " and ", out_backprop.dim_size(0))); + OP_REQUIRES( + context, tensor_in_shape.dim_size(4) == out_backprop.dim_size(4), + errors::InvalidArgument( + "Expected last dimension of tensor_in_shape and " + "out_backprop to match, got ", + tensor_in_shape.dim_size(4), " and ", out_backprop.dim_size(4))); + output->flat().setZero(); std::array input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), From dc9088ccf3ff8fb4edfcda68e2359ec2b6a81323 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 5 May 2021 22:39:29 -0700 Subject: [PATCH 172/243] Validate arguments of `FractionalMaxPoolGrad` PiperOrigin-RevId: 372274982 Change-Id: If46b0c442efa4eaef635ce6a476717060420122c --- tensorflow/core/kernels/fractional_max_pool_op.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tensorflow/core/kernels/fractional_max_pool_op.cc b/tensorflow/core/kernels/fractional_max_pool_op.cc index 619a3507ce415f..1a2a783d135c54 100644 --- a/tensorflow/core/kernels/fractional_max_pool_op.cc +++ b/tensorflow/core/kernels/fractional_max_pool_op.cc @@ -235,6 +235,20 @@ class FractionalMaxPoolGradOp : public OpKernel { // Just to make it similar to FractionalMaxPoolOp. constexpr int tensor_in_and_out_dims = 4; + OP_REQUIRES( + context, tensor_in.dims() == tensor_in_and_out_dims, + errors::InvalidArgument("orig_input should be a tensor of rank 4, got ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_in.NumElements() > 0, + errors::InvalidArgument("orig_input must not be empty, got ", + tensor_in.DebugString())); + OP_REQUIRES(context, tensor_out.dims() == tensor_in_and_out_dims, + errors::InvalidArgument( + "orig_output should be a tensor of rank 4, got ", + tensor_out.DebugString())); + OP_REQUIRES(context, tensor_out.NumElements() > 0, + errors::InvalidArgument("orig_output must not be empty, got ", + tensor_out.DebugString())); std::vector input_size(tensor_in_and_out_dims); std::vector output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { From 87c455ec4649f0bca8ebbb00ae377d7565aa30be Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:02:47 -0700 Subject: [PATCH 173/243] Validate inputs of `FractionalAvgPoolGrad`. PiperOrigin-RevId: 372420640 Change-Id: Icc583928e6cdc3062e12498e4d2337a8fe3da016 --- tensorflow/core/kernels/fractional_avg_pool_op.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc index b8a5083e5340f1..0452638a066795 100644 --- a/tensorflow/core/kernels/fractional_avg_pool_op.cc +++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc @@ -250,6 +250,19 @@ class FractionalAvgPoolGradOp : public OpKernel { const int64 out_cols = out_backprop.dim_size(2); const int64 out_depth = out_backprop.dim_size(3); + OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, + errors::InvalidArgument("Given out_backprop shape ", + out_backprop.shape().DebugString(), + ", row_seq_tensor must have at least ", + out_rows + 1, " elements, but got ", + row_seq_tensor.NumElements())); + OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, + errors::InvalidArgument("Given out_backprop shape ", + out_backprop.shape().DebugString(), + ", col_seq_tensor must have at least ", + out_cols + 1, " elements, but got ", + col_seq_tensor.NumElements())); + auto row_seq_tensor_flat = row_seq_tensor.flat(); auto col_seq_tensor_flat = col_seq_tensor.flat(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat(); From c29463adea245860e33fcfaccf600e176e8bf247 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:24:09 -0700 Subject: [PATCH 174/243] Prevent heap OOB error in `MaxPoolGrad` PiperOrigin-RevId: 372424854 Change-Id: Idac0f23867ad8b0601cafbaaa52d5e64269e63a7 --- tensorflow/core/kernels/maxpooling_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/maxpooling_op.cc b/tensorflow/core/kernels/maxpooling_op.cc index 5652addd00a957..5b1f084d886516 100644 --- a/tensorflow/core/kernels/maxpooling_op.cc +++ b/tensorflow/core/kernels/maxpooling_op.cc @@ -192,7 +192,9 @@ static void SpatialMaxPoolWithArgMaxHelper( // CHECK(input_backprop_index >= in_start && input_backprop_index < // in_end) FastBoundsCheck(input_backprop_index - in_start, in_end - in_start); - input_backprop_flat(input_backprop_index) += out_backprop_flat(index); + if (index < out_backprop.NumElements()) { + input_backprop_flat(input_backprop_index) += out_backprop_flat(index); + } } } }; From 4ae20817579a464e13a5a28f485c1437d1e5ea50 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 14:51:41 -0700 Subject: [PATCH 175/243] Fix SEGV in CTC ops PiperOrigin-RevId: 372430279 Change-Id: I7ec2ad9d6f4d0980c33de45d27c6b17df5c6e26f --- tensorflow/core/kernels/ctc_decoder_ops.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc index 1075b17694ddcf..60b10107537408 100644 --- a/tensorflow/core/kernels/ctc_decoder_ops.cc +++ b/tensorflow/core/kernels/ctc_decoder_ops.cc @@ -70,6 +70,9 @@ class CTCDecodeHelper { if (inputs_shape.dims() != 3) { return errors::InvalidArgument("inputs is not a 3-Tensor"); } + if (inputs_shape.num_elements() == 0) { + return errors::InvalidArgument("inputs must not be empty"); + } const int64 max_time = inputs_shape.dim_size(0); const int64 batch_size = inputs_shape.dim_size(1); From 55993172172f6eb42a89590c4ba9626e1599410e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 15:31:05 -0700 Subject: [PATCH 176/243] Fix heap OOB read in dequantize op. Also fixes SEGV in same op PiperOrigin-RevId: 372437896 Change-Id: I135e94d360c2a1ce374c10f7e0fed1af603dbc02 --- tensorflow/core/kernels/dequantize_op.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/dequantize_op.cc b/tensorflow/core/kernels/dequantize_op.cc index 481909e8420e76..b9d786ac7c30e1 100644 --- a/tensorflow/core/kernels/dequantize_op.cc +++ b/tensorflow/core/kernels/dequantize_op.cc @@ -69,6 +69,18 @@ class DequantizeOp : public OpKernel { if (axis_ > -1) { num_slices = input.dim_size(axis_); } + OP_REQUIRES(ctx, input_min_tensor.NumElements() == num_slices, + errors::InvalidArgument( + "input_min_tensor must have as many elements as input on " + "the dequantization axis (", + axis_, "), got ", input_min_tensor.NumElements(), + ", expected ", num_slices)); + OP_REQUIRES(ctx, input_max_tensor.NumElements() == num_slices, + errors::InvalidArgument( + "input_max_tensor must have as many elements as input on " + "the dequantization axis (", + axis_, "), got ", input_max_tensor.NumElements(), + ", expected ", num_slices)); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); From 27279cd23c52585b0cdf28e26887074dc64d0823 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 15:55:00 -0700 Subject: [PATCH 177/243] Prevent overflow in sparse op PiperOrigin-RevId: 372442006 Change-Id: I60fe31cd7e56fb3501e97c63500caf902ddeee96 --- tensorflow/core/kernels/sparse_split_op.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index 3d02be47cbbef5..ca3e77f76af7f7 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -63,11 +63,18 @@ class SparseSplitOp : public OpKernel { input_shape.vec()(split_dim), "), got ", num_split_)); + // Prevent overflow by constructing the dense shape separately + TensorShape dense_shape; + const auto input_shape_flat = input_shape.flat(); + for (int i = 0; i < input_shape.NumElements(); i++) { + OP_REQUIRES_OK(context, + dense_shape.AddDimWithStatus(input_shape_flat(i))); + } + sparse::SparseTensor sparse_tensor; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create( - input_indices, input_values, - TensorShape(input_shape.vec()), &sparse_tensor)); + sparse::SparseTensor::Create(input_indices, input_values, + dense_shape, &sparse_tensor)); std::vector outputs; OP_REQUIRES_OK(context, From f76310aaa71e690ae359af3b4d7f59efa56aa7b9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 15:37:08 -0700 Subject: [PATCH 178/243] Fix division by zero in TFLite padding. PiperOrigin-RevId: 370777494 Change-Id: Ic1331e4a1603b9e4c8aa183012a6c8237410aa0f --- tensorflow/lite/kernels/padding.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/lite/kernels/padding.h b/tensorflow/lite/kernels/padding.h index 1116b1da852cf6..6b4ab7fa58d1aa 100644 --- a/tensorflow/lite/kernels/padding.h +++ b/tensorflow/lite/kernels/padding.h @@ -44,6 +44,11 @@ inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, inline int ComputeOutSize(TfLitePadding padding, int image_size, int filter_size, int stride, int dilation_rate = 1) { int effective_filter_size = (filter_size - 1) * dilation_rate + 1; + + // TODO(b/186448822): This uses 0 since the function has no other way to + // report error case + if (stride == 0) return 0; + switch (padding) { case kTfLitePaddingSame: return (image_size + stride - 1) / stride; From 5500bcc40f6d267eca67ea1c8099eca3bf458c2c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:45:43 -0700 Subject: [PATCH 179/243] Prevent another div by 0 in optimized pooling implementations TFLite PiperOrigin-RevId: 370800091 Change-Id: I2119352f57fb5ca4f2051e0e2d749403304a979b --- tensorflow/lite/kernels/pooling.cc | 4 ++++ tensorflow/lite/kernels/pooling_test.cc | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/tensorflow/lite/kernels/pooling.cc b/tensorflow/lite/kernels/pooling.cc index 71dd349481c343..fabcb833ceab40 100644 --- a/tensorflow/lite/kernels/pooling.cc +++ b/tensorflow/lite/kernels/pooling.cc @@ -83,6 +83,10 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto padding = params->padding; int out_width, out_height; + // Prevent division by 0 in optimized pooling implementations + TF_LITE_ENSURE(context, params->stride_height > 0); + TF_LITE_ENSURE(context, params->stride_width > 0); + data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, diff --git a/tensorflow/lite/kernels/pooling_test.cc b/tensorflow/lite/kernels/pooling_test.cc index 1b371361a4d875..ec3ba072e7c9c3 100644 --- a/tensorflow/lite/kernels/pooling_test.cc +++ b/tensorflow/lite/kernels/pooling_test.cc @@ -1083,5 +1083,18 @@ TEST(FloatPoolingOpTest, L2PoolPaddingValidSlide1) { EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.0, 6.5})); } +#ifdef GTEST_HAS_DEATH_TEST +TEST(FloatPoolingOpTest, MaxPoolWithZeroStride) { + EXPECT_DEATH( + FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D, + /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}}, + /*filter_width=*/2, /*filter_height=*/2, + /*output=*/{TensorType_FLOAT32, {}}, + /*padding=*/Padding_VALID, + /*stride_w=*/0, /*stride_h=*/0), + "Cannot allocate tensors"); +} +#endif + } // namespace } // namespace tflite From 32009c8b283ecdfe50c2ff4329447ffbf3bc1aa8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:45:57 -0700 Subject: [PATCH 180/243] Prevent one more div by 0 in TFLite PiperOrigin-RevId: 370800114 Change-Id: I6b956aeb8c458cc6f514408d2e89ffacfe249e57 --- tensorflow/lite/kernels/space_to_depth.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/space_to_depth.cc b/tensorflow/lite/kernels/space_to_depth.cc index 573ffe66e50a6c..56cc1deb7b8a3e 100644 --- a/tensorflow/lite/kernels/space_to_depth.cc +++ b/tensorflow/lite/kernels/space_to_depth.cc @@ -55,6 +55,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, input->type, output->type); const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; int output_height = input_height / block_size; From b7beecbfdc49ccf34ba2f1539b53fc003c428fa9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:10 -0700 Subject: [PATCH 181/243] Handle one more division by 0 in TFLite. PiperOrigin-RevId: 370800140 Change-Id: I9ab42e5aaccf02f226d1282611490a54cf7d273e --- tensorflow/lite/kernels/gather_nd.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/gather_nd.cc b/tensorflow/lite/kernels/gather_nd.cc index 20e98652ee57ec..b8698b3ea2021b 100644 --- a/tensorflow/lite/kernels/gather_nd.cc +++ b/tensorflow/lite/kernels/gather_nd.cc @@ -130,6 +130,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* indices = GetInput(context, node, kIndices); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + // Prevent division by 0 in the helper + TF_LITE_ENSURE(context, NumElements(params) > 0); + switch (indices->type) { case kTfLiteInt32: return EvalGatherNd(context, params, indices, output); From 5a473c58eceecb9fcf1f9cb5bb411ca3a20aa071 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:25 -0700 Subject: [PATCH 182/243] Fix another division by 0 in TFLite PiperOrigin-RevId: 370800181 Change-Id: I924809166a6131f5075e6d45c455106538d755f9 --- tensorflow/lite/kernels/transpose_conv.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/lite/kernels/transpose_conv.cc b/tensorflow/lite/kernels/transpose_conv.cc index c4447b2a468e52..98348bb343a64b 100644 --- a/tensorflow/lite/kernels/transpose_conv.cc +++ b/tensorflow/lite/kernels/transpose_conv.cc @@ -392,6 +392,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); + // Prevent divisions by 0 + TF_LITE_ENSURE(context, params->stride_height > 0); + TF_LITE_ENSURE(context, params->stride_width > 0); + // Resize any deferred dynamic tensors if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeTensor(context, output_shape, output)); From 2d53bd19504531374b560fbfc4eceb7815dc1b05 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:46:38 -0700 Subject: [PATCH 183/243] [CherryPick]: Prevent a null pointer exception in TFLite --- tensorflow/lite/kernels/maximum_minimum.cc | 58 +++++++++++----------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 76e51d18741be5..6feac70f684b03 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -100,34 +100,36 @@ template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); - if (kernel_type == kReference) { - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - default: - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; - } - } else { - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; + // If inputs have no element, shortcircuit. + if (NumElements(op_context.input1) == 0 || + NumElements(op_context.input2) == 0) { + return kTfLiteOk; + } + + switch (op_context.output->type) { + case kTfLiteFloat32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteUInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt64: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt16: + TFLiteOperation(context, node, op_context); + break; + default: + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; } return kTfLiteOk; } From dad0ed803834b0e3daae0f8246deafaec9576da5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:47:59 -0700 Subject: [PATCH 184/243] Prevent a null pointer dereference in TFLite. PiperOrigin-RevId: 370800353 Change-Id: Ic9c9712ce5c6e384c954dcd640a5bd9ff05c9a05 --- tensorflow/lite/core/subgraph.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 483aa98a96de4d..4f4394243aef92 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -794,10 +794,17 @@ TfLiteStatus Subgraph::Invoke() { TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index)); } if (tensor->data.raw == nullptr && tensor->bytes > 0) { - if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) { + if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1 && + tensor->dims->size != 1) { // In general, having a tensor here with no buffer will be an error. - // However, for the reshape operator, the second input tensor is only - // used for the shape, not for the data. Thus, null buffer is ok. + // However, for the reshape operator, the second input tensor is + // sometimes only used for the shape, not for the data. Thus, null + // buffer is ok in this situation. + // The situation where null buffer is not ok for reshape operator is + // only when there are 2 inputs given to the node and the one + // corresponding to the shape (i == 1) is a vector that contains all + // dimensions. See `GetOutputShape()` function in + // `tensorflow/lite/kernels/reshape.cc` continue; } else { // In all other cases, we need to return an error as otherwise we will From 561ff0219ed12c9a4906ddb6bf94d145a1bab8d1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:50:14 -0700 Subject: [PATCH 185/243] Update tensorflow/lite/kernels/maximum_minimum.cc --- tensorflow/lite/kernels/maximum_minimum.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 6feac70f684b03..006452b242973f 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -108,22 +108,22 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { switch (op_context.output->type) { case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; case kTfLiteInt8: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; case kTfLiteInt32: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; case kTfLiteInt64: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; case kTfLiteInt16: - TFLiteOperation(context, node, op_context); + TFLiteOperation(context, node, op_context); break; default: context->ReportError(context, From bdb4eba8d7b79bf74b8393b9ef324eb8e40a96de Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:52:17 -0700 Subject: [PATCH 186/243] Revert "[CherryPick]: Prevent a null pointer exception in TFLite" --- tensorflow/lite/kernels/maximum_minimum.cc | 58 +++++++++++----------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 006452b242973f..76e51d18741be5 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -100,36 +100,34 @@ template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); - // If inputs have no element, shortcircuit. - if (NumElements(op_context.input1) == 0 || - NumElements(op_context.input2) == 0) { - return kTfLiteOk; - } - - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt16: - TFLiteOperation(context, node, op_context); - break; - default: - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; + if (kernel_type == kReference) { + switch (op_context.output->type) { + case kTfLiteFloat32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteUInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt64: + TFLiteOperation(context, node, op_context); + break; + default: + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; + } + } else { + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; } return kTfLiteOk; } From af82909d8209cceafbecdf7e8fc08504fd0f2e10 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:52:32 -0700 Subject: [PATCH 187/243] Revert "Revert "[CherryPick]: Prevent a null pointer exception in TFLite"" --- tensorflow/lite/kernels/maximum_minimum.cc | 58 +++++++++++----------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 76e51d18741be5..006452b242973f 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -100,34 +100,36 @@ template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); - if (kernel_type == kReference) { - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - default: - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; - } - } else { - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; + // If inputs have no element, shortcircuit. + if (NumElements(op_context.input1) == 0 || + NumElements(op_context.input2) == 0) { + return kTfLiteOk; + } + + switch (op_context.output->type) { + case kTfLiteFloat32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteUInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt64: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt16: + TFLiteOperation(context, node, op_context); + break; + default: + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; } return kTfLiteOk; } From 1450b34f46549ad83ed57f2ff2420dbed6b1a910 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:54:38 -0700 Subject: [PATCH 188/243] Update tensorflow/lite/kernels/maximum_minimum.cc --- tensorflow/lite/kernels/maximum_minimum.cc | 55 ++++++++++++---------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 006452b242973f..ab7b37270c95ba 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -106,30 +106,37 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt16: - TFLiteOperation(context, node, op_context); - break; - default: - context->ReportError(context, - "Type %d is currently not supported by Maximum.", - op_context.output->type); - return kTfLiteError; + if (kernel_type == kReference) { + switch (op_context.output->type) { + case kTfLiteFloat32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteUInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt8: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt32: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt64: + TFLiteOperation(context, node, op_context); + break; + case kTfLiteInt16: + TFLiteOperation(context, node, op_context); + break; + default: + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; + } + } else { + context->ReportError(context, + "Type %d is currently not supported by Maximum.", + op_context.output->type); + return kTfLiteError; } return kTfLiteOk; } From 7a76d1d4e4bb5583383e52fc1a22ad4404688ee1 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:55:23 -0700 Subject: [PATCH 189/243] Update tensorflow/lite/kernels/maximum_minimum.cc --- tensorflow/lite/kernels/maximum_minimum.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index ab7b37270c95ba..7510c83db78857 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -123,9 +123,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteInt64: TFLiteOperation(context, node, op_context); break; - case kTfLiteInt16: - TFLiteOperation(context, node, op_context); - break; default: context->ReportError(context, "Type %d is currently not supported by Maximum.", From 77c37ac28f32aaf5b9d790396f550f4845b41a58 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 26 May 2021 10:55:28 -0700 Subject: [PATCH 190/243] Update tensorflow/lite/kernels/maximum_minimum.cc --- tensorflow/lite/kernels/maximum_minimum.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/maximum_minimum.cc b/tensorflow/lite/kernels/maximum_minimum.cc index 7510c83db78857..690d9568c43f38 100644 --- a/tensorflow/lite/kernels/maximum_minimum.cc +++ b/tensorflow/lite/kernels/maximum_minimum.cc @@ -133,7 +133,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { context->ReportError(context, "Type %d is currently not supported by Maximum.", op_context.output->type); - return kTfLiteError; + return kTfLiteError; } return kTfLiteOk; } From ac7465dee7c614791c4d9022ed9aec636badb68a Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 27 Apr 2021 17:47:46 -0700 Subject: [PATCH 191/243] Prevent infinite loop/stack overflow in TFLite `while` op. PiperOrigin-RevId: 370800333 Change-Id: I6a2e4ff849da339545c449db2af7e11ce6ff02c3 --- tensorflow/lite/kernels/while.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/lite/kernels/while.cc b/tensorflow/lite/kernels/while.cc index 8951647bf43bd7..b5902706d7cc3f 100644 --- a/tensorflow/lite/kernels/while.cc +++ b/tensorflow/lite/kernels/while.cc @@ -132,6 +132,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* subgraphs = this_subgraph->GetSubgraphs(); TF_LITE_ENSURE(context, op_data->cond_subgraph_index < subgraphs->size()); TF_LITE_ENSURE(context, op_data->body_subgraph_index < subgraphs->size()); + TF_LITE_ENSURE(context, + op_data->cond_subgraph_index != op_data->body_subgraph_index); Subgraph* cond_subgraph = (*subgraphs)[op_data->cond_subgraph_index].get(); Subgraph* body_subgraph = (*subgraphs)[op_data->body_subgraph_index].get(); From 37c13da53a00217ceab094c9eec0362c4234657e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:37:35 -0700 Subject: [PATCH 192/243] CherryPick]:Prevent division by 0. --- tensorflow/lite/kernels/conv.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 49090075626cff..191711bd40e6d7 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -427,6 +427,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, // Only one scale factor per batch is typically necessary. See optimized // implementation for why we need to allocate for the height of the inputs // flattened to 2D. + TF_LITE_ENSURE(context, channels_in != 0); const int height = NumElements(input) / channels_in; int scaling_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { @@ -449,6 +450,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; // See above comment for the need to allocate for height of inputs. + TF_LITE_ENSURE(context, channels_in != 0); const int height = NumElements(input) / channels_in; int scaling_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { @@ -656,8 +658,9 @@ void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); - const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; const TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); int8_t* quantized_input_ptr_batch = input_quantized->data.int8; @@ -726,8 +729,9 @@ void EvalHybrid(TfLiteContext* context, TfLiteNode* node, CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); - const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; float* input_ptr = GetTensorData(input); int8_t* quantized_input_ptr_batch = GetTensorData( From e2b12ce21914057a243685bb5024d9eaf3f8a1b8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:57:00 -0700 Subject: [PATCH 193/243] Prevent division by 0 PiperOrigin-RevId: 370966645 Change-Id: I831bfd96c7eb77b02d7ebb744335f59f6e5728cb --- tensorflow/lite/kernels/embedding_lookup.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/lite/kernels/embedding_lookup.cc b/tensorflow/lite/kernels/embedding_lookup.cc index c97cffe14e18eb..ca76e1068cd338 100644 --- a/tensorflow/lite/kernels/embedding_lookup.cc +++ b/tensorflow/lite/kernels/embedding_lookup.cc @@ -74,6 +74,10 @@ TfLiteStatus EvalSimple(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* lookup, const TfLiteTensor* value, TfLiteTensor* output) { const int row_size = SizeOfDimension(value, 0); + if (row_size == 0) { + // Propagate empty tensor if input is empty + return kTfLiteOk; + } const int row_bytes = value->bytes / row_size; char* output_raw = GetTensorData(output); From 06ed916d34e38b1c366ff1d32f6e8667751669a8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 14:22:30 -0700 Subject: [PATCH 194/243] Prevent division by 0 PiperOrigin-RevId: 370984990 Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de --- tensorflow/lite/kernels/space_to_batch_nd.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/space_to_batch_nd.cc b/tensorflow/lite/kernels/space_to_batch_nd.cc index 2fb7198cd67e8b..c045e799ebfed4 100644 --- a/tensorflow/lite/kernels/space_to_batch_nd.cc +++ b/tensorflow/lite/kernels/space_to_batch_nd.cc @@ -73,6 +73,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, for (int dim = 0; dim < kSpatialDimensionNum; ++dim) { int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] + paddings_data[dim * 2 + 1]); + TF_LITE_ENSURE(context, block_shape[dim] != 0); TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0); output_size->data[dim + 1] = final_dim_size / block_shape[dim]; } From fc45a66f1b1b11d4a21354b6d06faf2f39413b10 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:13:03 -0700 Subject: [PATCH 195/243] Prevent division by 0 PiperOrigin-RevId: 370995582 Change-Id: I670ffaf52d1ff8823ec31ea5f438f9125b402223 --- tensorflow/lite/kernels/svdf.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/svdf.cc b/tensorflow/lite/kernels/svdf.cc index ae6d85d6e6cddb..16714a37426526 100644 --- a/tensorflow/lite/kernels/svdf.cc +++ b/tensorflow/lite/kernels/svdf.cc @@ -97,6 +97,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const int rank = params->rank; const int batch_size = input->dims->data[0]; const int num_filters = weights_feature->dims->data[0]; + TF_LITE_ENSURE(context, rank != 0); TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); const int num_units = num_filters / rank; const int memory_size = weights_time->dims->data[1]; From d7b2df1b18efe42d697093b9ea5525cbbd905d51 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:31:26 -0700 Subject: [PATCH 196/243] Prevent division by 0 PiperOrigin-RevId: 370998952 Change-Id: I6b1d49079624ee1447d2d9b53a8976fb356cc8f5 --- tensorflow/lite/kernels/split.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/split.cc b/tensorflow/lite/kernels/split.cc index ec3dd54ebf723a..f258d5b35327af 100644 --- a/tensorflow/lite/kernels/split.cc +++ b/tensorflow/lite/kernels/split.cc @@ -57,6 +57,7 @@ TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); + TF_LITE_ENSURE(context, num_splits != 0); TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0, "Not an even split"); const int slice_size = input_size / num_splits; From 7d8787e1297f204b5d892c1fcc7874d668ff0917 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 12:58:07 -0700 Subject: [PATCH 197/243] Prevent division by 0 in OneHot implementation If input indices is degenerate, the implementation would do a divide by zero. See https://github.com/tensorflow/tensorflow/blob/745d57df6d5e9bc568666a2a48ed8dd629c27241/tensorflow/lite/kernels/one_hot.cc#L68-L72 PiperOrigin-RevId: 370966870 Change-Id: Ie018337811c8016b5a1d3a277d00d5f2e19a2058 --- tensorflow/lite/kernels/one_hot.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/lite/kernels/one_hot.cc b/tensorflow/lite/kernels/one_hot.cc index 2ac12fe9308f38..fd4f67da0943be 100644 --- a/tensorflow/lite/kernels/one_hot.cc +++ b/tensorflow/lite/kernels/one_hot.cc @@ -67,6 +67,11 @@ void OneHotComputeImpl(const OneHotContext& op_context) { for (int i = 0; i < op_context.axis; ++i) { prefix_dim_size *= op_context.indices->dims->data[i]; } + if (prefix_dim_size == 0) { + // If indices tensor is degenerate, return a degenerate tensor, just like + // TensorFlow does. + return; + } const int suffix_dim_size = NumElements(op_context.indices) / prefix_dim_size; const int depth = *op_context.depth->data.i32; From 34c68a212b738dd7a938c759313f878d410f2eb5 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 15:53:48 -0700 Subject: [PATCH 198/243] Prevent divisions by 0 --- tensorflow/lite/kernels/depthwise_conv.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index 38a3f34709e5bc..659acdd9e020ac 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -175,8 +175,8 @@ TfLiteStatus ComputeDepthMultiplier(TfLiteContext* context, int16* depth_multiplier) { int num_filter_channels = SizeOfDimension(filter, 3); int num_input_channels = SizeOfDimension(input, 3); + TF_LITE_ENSURE(context, num_input_channels != 0); TF_LITE_ENSURE_EQ(context, num_filter_channels % num_input_channels, 0); - *depth_multiplier = num_filter_channels / num_input_channels; return kTfLiteOk; } From a223f98ff4309c549c194669d5a5a228f462bcf9 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 16:16:56 -0700 Subject: [PATCH 199/243] Prevent a division by 0 PiperOrigin-RevId: 371007407 Change-Id: Iecf2718de48d6bf5a69b02a9df9deda8ec1b19d3 --- tensorflow/lite/kernels/hashtable_lookup.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/hashtable_lookup.cc b/tensorflow/lite/kernels/hashtable_lookup.cc index da1116cf858667..19fde91e0a3ac9 100644 --- a/tensorflow/lite/kernels/hashtable_lookup.cc +++ b/tensorflow/lite/kernels/hashtable_lookup.cc @@ -106,6 +106,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); + TF_LITE_ENSURE(context, num_rows != 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; From 9475fc132227f42e53a49442d80257e8e17ba2f0 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 16:50:55 -0700 Subject: [PATCH 200/243] Fix integer overflow in TFLite concat PiperOrigin-RevId: 371013841 Change-Id: I6a4782ce7ca753e23ff31e7fb6aeb7f9d412cd29 --- tensorflow/lite/kernels/concatenation.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/lite/kernels/concatenation.cc b/tensorflow/lite/kernels/concatenation.cc index 870a10abfc4035..6ee31b248fe6ac 100644 --- a/tensorflow/lite/kernels/concatenation.cc +++ b/tensorflow/lite/kernels/concatenation.cc @@ -19,6 +19,8 @@ limitations under the License. #include #include +#include + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" @@ -69,6 +71,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, t->type, input_type); for (int d = 0; d < t0->dims->size; ++d) { if (d == axis) { + // Avoid integer overflow in sum_axis below + TF_LITE_ENSURE(context, t->dims->data[axis] >= 0); + TF_LITE_ENSURE(context, t->dims->data[axis] <= + std::numeric_limits::max() - sum_axis); sum_axis += t->dims->data[axis]; } else { TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]); From bafe6745baae6421e7fa1c99cb1be99aa69ba035 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Apr 2021 19:43:09 -0700 Subject: [PATCH 201/243] Fix a dangerous integer overflow and a malloc of negative size. --- tensorflow/lite/kernels/embedding_lookup_sparse.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/kernels/embedding_lookup_sparse.cc b/tensorflow/lite/kernels/embedding_lookup_sparse.cc index 9546db7b7959b1..844cf30ce11311 100644 --- a/tensorflow/lite/kernels/embedding_lookup_sparse.cc +++ b/tensorflow/lite/kernels/embedding_lookup_sparse.cc @@ -160,6 +160,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Resize output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); + TF_LITE_ENSURE(context, output_shape != nullptr); int k = 0; int embedding_size = 1; int lookup_size = 1; From a61b528458f52801f20b0a9a5fe78297b95e1c3c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 17:50:10 -0700 Subject: [PATCH 202/243] Prevent array write out-of-bounds. If user passes an invalid axis, then we copy one too many dimensions to the output in the loop below these checks. Even if we didn't do that, there will be further issues with an invalid axis, so we check for that right now. PiperOrigin-RevId: 371023299 Change-Id: I9eca37ffc2b29e8e48710f500701270ef0790224 --- tensorflow/lite/kernels/arg_min_max.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/kernels/arg_min_max.cc b/tensorflow/lite/kernels/arg_min_max.cc index 593e604aff64c9..de80be5509d776 100644 --- a/tensorflow/lite/kernels/arg_min_max.cc +++ b/tensorflow/lite/kernels/arg_min_max.cc @@ -36,6 +36,9 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input, axis_value += NumDimensions(input); } + TF_LITE_ENSURE(context, axis_value >= 0); + TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); + // Copy the input dimensions to output except the axis dimension. TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1); int j = 0; From ef68faa9189a0b94ea601dcbc0cb46b4c37c73b8 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 28 Apr 2021 18:12:15 -0700 Subject: [PATCH 203/243] Prevent array OOB read/write PiperOrigin-RevId: 371026165 Change-Id: I26ac6372c87246e03c7eb8c94e84c84d86054b36 --- tensorflow/lite/kernels/split_v.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/lite/kernels/split_v.cc b/tensorflow/lite/kernels/split_v.cc index b888e8adfa6b12..a5fdf89ac199d8 100644 --- a/tensorflow/lite/kernels/split_v.cc +++ b/tensorflow/lite/kernels/split_v.cc @@ -90,6 +90,8 @@ TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, } } + TF_LITE_ENSURE(context, axis_value >= 0); + TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); if (minus_one_index != -1) { From 1adf585e2b9f36f3991abd64e731df2422f940e9 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 13 Apr 2021 14:49:50 -0700 Subject: [PATCH 204/243] Fix invalid resize. --- tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 7a5ae1c6240b55..1457e5e2c73f7d 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -175,6 +175,11 @@ class RaggedTensorToVariantOp : public OpKernel { // Unbatch the Ragged Tensor and encode the components. std::vector ragged_components; + auto batched_splits_top_vec = + batched_ragged_input.splits(0).vec(); + int num_components = batched_splits_top_vec.size() - 1; + OP_REQUIRES(context, num_components >= 0, + errors::Internal("Invalid split argument.")); OP_REQUIRES_OK(context, UnbatchRaggedZerothDim( batched_ragged_input, &ragged_components)); std::vector encoded_components(ragged_components.size()); From 89d9c336b7d0f5031a4e15488a6f1ede918c6df3 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 15 Apr 2021 13:03:19 -0700 Subject: [PATCH 205/243] Fix failing CHECK. --- tensorflow/core/kernels/sparse_cross_op.cc | 38 +++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index a16e34c7cb4424..7843e651c599c0 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -26,6 +26,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/fingerprint.h" @@ -295,6 +296,7 @@ class SparseCrossOp : public OpKernel { int64 signed_hash_key_; OP_REQUIRES_OK(context, context->GetAttr("hash_key", &signed_hash_key_)); hash_key_ = static_cast(signed_hash_key_); + OP_REQUIRES_OK(context, context->GetAttr("internal_type", &internal_type_)); } void Compute(OpKernelContext* context) override { @@ -307,6 +309,10 @@ class SparseCrossOp : public OpKernel { OpInputList dense_list_in; OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); + DataType internal_type = internal_type_; + OP_REQUIRES_OK( + context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, + dense_list_in, internal_type)); ValidateInput(context, indices_list_in, values_list_in, shapes_list_in, dense_list_in); @@ -352,10 +358,19 @@ class SparseCrossOp : public OpKernel { const OpInputList& indices_list_in, const OpInputList& values_list_in, const OpInputList& shapes_list_in, - const OpInputList& dense_list_in) { + const OpInputList& dense_list_in, + const DataType& internal_type) { const auto size = indices_list_in.size(); + // Only perform internal_type check for SparseCrossOp. + // Check if the internal_type is not invalid before doing so. + bool check_type = internal_type != DT_INVALID; // Validates indices_list_in OpInputList. for (int i = 0; i < size; i++) { + if (check_type && indices_list_in[i].dtype() != DT_INT64) { + return errors::InvalidArgument("Input indices should be of type ", + DT_INT64, " but received ", + indices_list_in[i].dtype()); + } OP_REQUIRES( context, TensorShapeUtils::IsMatrix(indices_list_in[i].shape()), errors::InvalidArgument( @@ -374,6 +389,14 @@ class SparseCrossOp : public OpKernel { errors::InvalidArgument("Expected ", size, " input values, got ", values_list_in.size())); for (int i = 0; i < size; i++) { + // Make sure to avoid the expected type to be string, but input values to be + // int64. + if (check_type && internal_type == DT_STRING && + values_list_in[i].dtype() == DT_INT64) { + return errors::InvalidArgument("Input values should be of internal type ", + internal_type, " but received ", + values_list_in[i].dtype()); + } OP_REQUIRES( context, TensorShapeUtils::IsVector(values_list_in[i].shape()), errors::InvalidArgument( @@ -396,6 +419,11 @@ class SparseCrossOp : public OpKernel { shapes_list_in.size())); const auto batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); for (int i = 0; i < size; i++) { + if (check_type && shapes_list_in[i].dtype() != DT_INT64) { + return errors::InvalidArgument("Input shape should be of type ", DT_INT64, + " but received ", + shapes_list_in[i].dtype()); + } OP_REQUIRES( context, TensorShapeUtils::IsVector(shapes_list_in[i].shape()), errors::InvalidArgument( @@ -415,6 +443,14 @@ class SparseCrossOp : public OpKernel { // Validates dense_list_in OpInputList for (int i = 0; i < dense_list_in.size(); ++i) { + // Make sure to avoid the expected type to be string, but input values to be + // int64. + if (check_type && internal_type == DT_STRING && + dense_list_in[i].dtype() == DT_INT64) { + return errors::InvalidArgument("Dense inputs should be of internal type ", + internal_type, " but received ", + dense_list_in[i].dtype()); + } OP_REQUIRES( context, TensorShapeUtils::IsMatrix(dense_list_in[i].shape()), errors::InvalidArgument( From eceaa79647de4a98ee080457fd879f1ee69f5313 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Fri, 28 May 2021 14:18:40 -0700 Subject: [PATCH 206/243] Fix crash with tf.transpose when a is complex and conjugate is True --- tensorflow/core/kernels/transpose_functor.h | 2 +- tensorflow/python/kernel_tests/transpose_op_test.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/transpose_functor.h b/tensorflow/core/kernels/transpose_functor.h index a89fc40d772594..4b07a076296aa4 100644 --- a/tensorflow/core/kernels/transpose_functor.h +++ b/tensorflow/core/kernels/transpose_functor.h @@ -19,6 +19,7 @@ limitations under the License. #include #include #include + #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/platform/logging.h" @@ -166,7 +167,6 @@ template Status DoTransposeImpl(const Device& d, const Tensor& in, const gtl::ArraySlice perm, bool conjugate, Tensor* out) { - CHECK_GE(in.dims(), 2); CHECK_EQ(in.dims(), out->dims()); CHECK_EQ(in.dims(), perm.size()); CHECK_EQ(in.dtype(), out->dtype()); diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py index 8d1fe388c55be1..6ae67bef8b86a1 100644 --- a/tensorflow/python/kernel_tests/transpose_op_test.py +++ b/tensorflow/python/kernel_tests/transpose_op_test.py @@ -373,6 +373,8 @@ def testDouble(self): @test_util.run_v1_only("b/120545219") def testComplex64(self): + self._testBoth(np.array(np.complex(1, 2)).astype(np.complex64)) + self._testBoth(np.complex(1, 2) * np.arange(0, 21).astype(np.complex64)) self._testBoth( np.complex(1, 2) * np.arange(0, 21).reshape([3, 7]).astype(np.complex64)) @@ -385,6 +387,8 @@ def testComplex64(self): @test_util.run_v1_only("b/120545219") def testComplex128(self): + self._testBoth(np.array(np.complex(1, 2)).astype(np.complex128)) + self._testBoth(np.complex(1, 2) * np.arange(0, 21).astype(np.complex128)) self._testBoth( np.complex(1, 2) * np.arange(0, 21).reshape([3, 7]).astype(np.complex128)) From a0a1b7b5dbe0fe060bec1142e57fd970fca65620 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Fri, 28 May 2021 16:10:08 -0700 Subject: [PATCH 207/243] PR #48739: Update jsoncpp to 1.9.4 --- tensorflow/workspace.bzl | 8 ++++---- third_party/jsoncpp.BUILD | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index b9a4136a3acd7f..420073e8606940 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -571,12 +571,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "jsoncpp_git", build_file = clean_dep("//third_party:jsoncpp.BUILD"), - sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6", - strip_prefix = "jsoncpp-1.8.4", + sha256 = "e34a628a8142643b976c7233ef381457efad79468c67cb1ae0b83a33d7493999", + strip_prefix = "jsoncpp-1.9.4", system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz", - "https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.4.tar.gz", + "https://github.com/open-source-parsers/jsoncpp/archive/1.9.4.tar.gz", ], ) diff --git a/third_party/jsoncpp.BUILD b/third_party/jsoncpp.BUILD index cf3cba05556a0b..f41964477ce963 100644 --- a/third_party/jsoncpp.BUILD +++ b/third_party/jsoncpp.BUILD @@ -12,7 +12,6 @@ cc_library( "src/lib_json/json_writer.cpp", ], hdrs = [ - "include/json/autolink.h", "include/json/config.h", "include/json/features.h", "include/json/forwards.h", From 74f34ad0731f9c6a2defe198c9904a00d5d93903 Mon Sep 17 00:00:00 2001 From: Laura Pak Date: Fri, 23 Apr 2021 10:33:00 -0700 Subject: [PATCH 208/243] Prevent memory overflow in ParseAttrValue from nested tensors. PiperOrigin-RevId: 370108442 Change-Id: I84d64a5e8895a6aeffbf4749841b4c54d51b5889 --- tensorflow/core/framework/attr_value_util.cc | 58 +++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/framework/attr_value_util.cc b/tensorflow/core/framework/attr_value_util.cc index f911b5b7b6f6d5..1a332d1517ddc0 100644 --- a/tensorflow/core/framework/attr_value_util.cc +++ b/tensorflow/core/framework/attr_value_util.cc @@ -38,6 +38,9 @@ namespace { // Do not construct large tensors to compute their hash or compare for equality. constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024; // 32mb +// Limit nesting of tensors to 100 deep to prevent memory overflow. +constexpr int kMaxTensorNestDepth = 100; + // Return the size of the tensor represented by this TensorProto. If shape is // not fully defined return -1. int64 TensorByteSize(const TensorProto& t) { @@ -224,6 +227,54 @@ string SummarizeFunc(const NameAttrList& func) { return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]"); } +bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) { + int nests = 0; + int maxed_out = to_parse.length(); + int open_curly = to_parse.find('{'); + int open_bracket = to_parse.find('<'); + int close_curly = to_parse.find('}'); + int close_bracket = to_parse.find('>'); + if (open_curly == -1) { + open_curly = maxed_out; + } + if (open_bracket == -1) { + open_bracket = maxed_out; + } + int min = std::min(open_curly, open_bracket); + do { + if (open_curly == maxed_out && open_bracket == maxed_out) { + return true; + } + if (min == open_curly) { + nests += 1; + open_curly = to_parse.find('{', open_curly + 1); + if (open_curly == -1) { + open_curly = maxed_out; + } + } else if (min == open_bracket) { + nests += 1; + open_bracket = to_parse.find('<', open_bracket + 1); + if (open_bracket == -1) { + open_bracket = maxed_out; + } + } else if (min == close_curly) { + nests -= 1; + close_curly = to_parse.find('}', close_curly + 1); + if (close_curly == -1) { + close_curly = maxed_out; + } + } else if (min == close_bracket) { + nests -= 1; + close_bracket = to_parse.find('>', close_bracket + 1); + if (close_bracket == -1) { + close_bracket = maxed_out; + } + } + min = std::min({open_curly, open_bracket, close_curly, close_bracket}); + } while (nests < 100); + return false; +} + } // namespace string SummarizeAttrValue(const AttrValue& attr_value) { @@ -448,7 +499,12 @@ bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) { } else { to_parse = strings::StrCat(field_name, ": ", text); } - + if (field_name == "tensor") { + if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth, + to_parse)) { + return false; + } + } return ProtoParseFromString(to_parse, out); } From 3b002b9c7f5b1559707c0ab3d7baac4c6455a134 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 15:22:49 -0700 Subject: [PATCH 209/243] Fix heap OOB / undefined behavior in `RaggedTensorToTensor` PiperOrigin-RevId: 373244623 Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2 --- .../kernels/ragged_tensor_to_tensor_op.cc | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index cf3efcd49b4886..b961468f274d0f 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -207,8 +207,8 @@ class RaggedTensorToTensorBaseOp : public OpKernel { DCHECK_EQ(result->size(), first_dimension); } - void CalculateOutputIndexRowSplit( - OpKernelContext* context, const RowPartitionTensor& row_split, + Status CalculateOutputIndexRowSplit( + const RowPartitionTensor& row_split, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { @@ -232,10 +232,11 @@ class RaggedTensorToTensorBaseOp : public OpKernel { result->push_back(-1); } } - if (row_split_size > 0) { - OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), - errors::InvalidArgument("Invalid row split size.")); + if (row_split_size > 0 && result->size() != row_split(row_split_size - 1)) { + return errors::InvalidArgument("Invalid row split size."); } + + return Status::OK(); } // Calculate the output index of the first element of a list. @@ -259,20 +260,26 @@ class RaggedTensorToTensorBaseOp : public OpKernel { // result[6] = -1 because parent_output_index[value_rowids[6]] == -1 // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] - void CalculateOutputIndexValueRowID( - OpKernelContext* context, const RowPartitionTensor& value_rowids, + Status CalculateOutputIndexValueRowID( + const RowPartitionTensor& value_rowids, const vector& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { - return; + return Status::OK(); } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); - DCHECK_LT(current_value_rowid, parent_output_index.size()); + + if (current_value_rowid >= parent_output_index.size()) { + return errors::InvalidArgument( + "Got current_value_rowid=", current_value_rowid, + " which is not less than ", parent_output_index.size()); + } + INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { @@ -289,13 +296,23 @@ class RaggedTensorToTensorBaseOp : public OpKernel { } else { current_output_column = 0; current_value_rowid = next_value_rowid; - DCHECK_LT(next_value_rowid, parent_output_index.size()); + + if (next_value_rowid >= parent_output_index.size()) { + return errors::InvalidArgument( + "Got next_value_rowid=", next_value_rowid, + " which is not less than ", parent_output_index.size()); + } + current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } - OP_REQUIRES(context, result->size() == value_rowids.size(), - errors::InvalidArgument("Invalid row ids.")); + + if (result->size() != value_rowids.size()) { + return errors::InvalidArgument("Invalid row ids."); + } + + return Status::OK(); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, @@ -308,10 +325,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: - CalculateOutputIndexValueRowID( - context, row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, result); - return tensorflow::Status::OK(); + return CalculateOutputIndexValueRowID( + row_partition_tensor, parent_output_index, output_index_multiplier, + output_size, result); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( @@ -319,10 +335,9 @@ class RaggedTensorToTensorBaseOp : public OpKernel { row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } - CalculateOutputIndexRowSplit( - context, row_partition_tensor, parent_output_index, - output_index_multiplier, output_size, result); - return tensorflow::Status::OK(); + return CalculateOutputIndexRowSplit( + row_partition_tensor, parent_output_index, output_index_multiplier, + output_size, result); default: return errors::InvalidArgument( "Unsupported partition type:", From 6aac000beef03b02efb43939c1a9984946b8de0e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 15:41:51 -0700 Subject: [PATCH 210/243] Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373248068 Change-Id: I0a2041a0747901b3f00387a6a3bce9bca6b0b3b1 --- tensorflow/core/kernels/sparse_add_op.cc | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tensorflow/core/kernels/sparse_add_op.cc b/tensorflow/core/kernels/sparse_add_op.cc index 87a17b08820be0..e2cec69b14c46b 100644 --- a/tensorflow/core/kernels/sparse_add_op.cc +++ b/tensorflow/core/kernels/sparse_add_op.cc @@ -44,6 +44,11 @@ class SparseAddOp : public OpKernel { b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); + const int num_dims = a_indices->dim_size(1); + OP_REQUIRES(ctx, b_indices->dim_size(1) == num_dims, + errors::InvalidArgument( + "Input indices must have the same dimension, got ", + num_dims, " and ", b_indices->dim_size(1))); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); @@ -72,6 +77,13 @@ class SparseAddOp : public OpKernel { "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); + OP_REQUIRES( + ctx, a_shape->NumElements() == num_dims, + errors::InvalidArgument("Second dimension of a_indices and length of " + "a_shape must match, got ", + num_dims, " and ", a_shape->NumElements())); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Tesors must not be empty")); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( @@ -100,11 +112,6 @@ class SparseAddOp : public OpKernel { std::vector> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector out_values; - const int num_dims = a_shape->dim_size(0); - - OP_REQUIRES(ctx, num_dims > 0, - errors::InvalidArgument("Invalid input_a shape. Received: ", - a_shape->DebugString())); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. From 5995075f4399056fab992780d8771cae454cd446 Mon Sep 17 00:00:00 2001 From: Geeta Chavan Date: Tue, 1 Jun 2021 12:16:22 -0700 Subject: [PATCH 211/243] Prevent yet another division by zero --- tensorflow/core/kernels/conv_grad_input_ops.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc index 2f6200e5045b68..f4d1fa49e65436 100644 --- a/tensorflow/core/kernels/conv_grad_input_ops.cc +++ b/tensorflow/core/kernels/conv_grad_input_ops.cc @@ -673,6 +673,11 @@ class Conv2DCustomBackpropInputOp : public OpKernel { dims.batch_size == 1 || thread_work_unit_size >= min_thread_work_unit_size; + OP_REQUIRES( + context, work_unit_size > 0, + errors::InvalidArgument("input, filter_sizes and out_backprop tensors " + "must all have at least 1 element")); + const size_t shard_size = use_parallel_contraction ? 1 From 9ea660cb4d0ed82780912ca847c7207fbeaa5428 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Mon, 19 Apr 2021 18:32:56 -0700 Subject: [PATCH 212/243] Fix one more FPE. --- tensorflow/core/kernels/conv_ops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc index d5ce7de1d2572a..1c5a6ec30cbc55 100644 --- a/tensorflow/core/kernels/conv_ops.cc +++ b/tensorflow/core/kernels/conv_ops.cc @@ -173,6 +173,7 @@ struct LaunchConv2DOp { LaunchGeneric()(ctx, input, filter, row_stride, col_stride, row_dilation, col_dilation, padding, explicit_paddings, output, data_format); + } }; @@ -425,6 +426,9 @@ Status ComputeConv2DDimension(const Conv2DParameters& params, errors::InvalidArgument("Patch depth too large")); const int in_depth = static_cast(in_depth_raw); const int patch_depth = static_cast(patch_depth_raw); + TF_REQUIRES(patch_depth > 0, + errors::InvalidArgument( + "filter depth must be stricly positive, got ", patch_depth)); TF_REQUIRES(in_depth % patch_depth == 0, errors::InvalidArgument( "input depth must be evenly divisible by filter depth: ", From 9a1b7410d66eaf5eac61d9b5633b5f7ce2f37abf Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Thu, 29 Apr 2021 15:30:30 -0700 Subject: [PATCH 213/243] Fix heap-buffer-overflow issue with . --- tensorflow/core/kernels/sparse_reshape_op.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/core/kernels/sparse_reshape_op.cc b/tensorflow/core/kernels/sparse_reshape_op.cc index 059519a913b7e7..23c595da354daa 100644 --- a/tensorflow/core/kernels/sparse_reshape_op.cc +++ b/tensorflow/core/kernels/sparse_reshape_op.cc @@ -26,6 +26,7 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_util.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { @@ -34,6 +35,17 @@ class SparseReshapeOp : public OpKernel { explicit SparseReshapeOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { + const Tensor& input_indices_in = context->input(0); + const Tensor& input_shape_in = context->input(1); + + OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices_in.shape()), + errors::InvalidArgument("Input must be a matrix.")); + OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape_in.shape()), + errors::InvalidArgument("Input shape must be a vector.")); + OP_REQUIRES(context, + input_indices_in.dim_size(1) == input_shape_in.dim_size(0), + errors::InvalidArgument( + "Input tensor rank must match input shape length.")); Reshape(context, context->input(0), context->input(1), context->input(2), 0 /* output indices index */, 1 /* output shape index */); } From 20ff03a3e5975e5ea8821321acf9c62a4892d672 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 2 Jun 2021 10:07:35 -0700 Subject: [PATCH 214/243] Update tensorflow/core/kernels/conv_ops.cc --- tensorflow/core/kernels/conv_ops.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc index 1c5a6ec30cbc55..cb710975cc48be 100644 --- a/tensorflow/core/kernels/conv_ops.cc +++ b/tensorflow/core/kernels/conv_ops.cc @@ -173,7 +173,6 @@ struct LaunchConv2DOp { LaunchGeneric()(ctx, input, filter, row_stride, col_stride, row_dilation, col_dilation, padding, explicit_paddings, output, data_format); - } }; From c1e0e57f4c23d8531c3c9ffdf6a4eba6f2217418 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 2 Jun 2021 13:37:48 -0700 Subject: [PATCH 215/243] Fix the 2.1 branch after cherrypicks --- tensorflow/core/kernels/fft_ops.cc | 1 - .../core/kernels/quantize_and_dequantize_op.cc | 1 - tensorflow/core/kernels/quantized_conv_ops.cc | 2 -- tensorflow/core/kernels/sparse/kernels.cc | 1 - .../core/kernels/sparse/sparse_cholesky_op.cc | 2 -- tensorflow/core/kernels/sparse_add_op.cc | 1 - tensorflow/core/kernels/sparse_reshape_op.cc | 2 +- tensorflow/core/kernels/sparse_split_op.cc | 14 +++++++++----- 8 files changed, 10 insertions(+), 14 deletions(-) diff --git a/tensorflow/core/kernels/fft_ops.cc b/tensorflow/core/kernels/fft_ops.cc index 603f9383616399..d2f7124eba1de9 100644 --- a/tensorflow/core/kernels/fft_ops.cc +++ b/tensorflow/core/kernels/fft_ops.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS // See docs in ../ops/fft_ops.cc. diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc index 28be8a9fa11822..790a54c639e159 100644 --- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc +++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/core/framework/op_requires.h" #define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc index 1b83c09e1f7f18..893e68fa8acbcc 100644 --- a/tensorflow/core/kernels/quantized_conv_ops.cc +++ b/tensorflow/core/kernels/quantized_conv_ops.cc @@ -18,8 +18,6 @@ limitations under the License. #include #include -#include "tensorflow/core/platform/errors.h" - #define EIGEN_USE_THREADS #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK diff --git a/tensorflow/core/kernels/sparse/kernels.cc b/tensorflow/core/kernels/sparse/kernels.cc index dff9aeb83ccfec..d63b5233482cc4 100644 --- a/tensorflow/core/kernels/sparse/kernels.cc +++ b/tensorflow/core/kernels/sparse/kernels.cc @@ -22,7 +22,6 @@ limitations under the License. #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace functor { diff --git a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc index ab328672702eff..223fd51f2a2ddf 100644 --- a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc +++ b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc @@ -17,8 +17,6 @@ limitations under the License. #include #include -#include "tensorflow/core/framework/op_requires.h" - #define EIGEN_USE_THREADS #include "third_party/eigen3/Eigen/Core" diff --git a/tensorflow/core/kernels/sparse_add_op.cc b/tensorflow/core/kernels/sparse_add_op.cc index e2cec69b14c46b..6418c0e50af8a5 100644 --- a/tensorflow/core/kernels/sparse_add_op.cc +++ b/tensorflow/core/kernels/sparse_add_op.cc @@ -14,7 +14,6 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" diff --git a/tensorflow/core/kernels/sparse_reshape_op.cc b/tensorflow/core/kernels/sparse_reshape_op.cc index 23c595da354daa..f6e94d190ec9fa 100644 --- a/tensorflow/core/kernels/sparse_reshape_op.cc +++ b/tensorflow/core/kernels/sparse_reshape_op.cc @@ -25,8 +25,8 @@ limitations under the License. #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_util.h" +#include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" -#include "tensorflow/core/platform/errors.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc index ca3e77f76af7f7..b0c147da8a8344 100644 --- a/tensorflow/core/kernels/sparse_split_op.cc +++ b/tensorflow/core/kernels/sparse_split_op.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { @@ -64,17 +65,20 @@ class SparseSplitOp : public OpKernel { num_split_)); // Prevent overflow by constructing the dense shape separately - TensorShape dense_shape; + int64 total_elements = 1; const auto input_shape_flat = input_shape.flat(); for (int i = 0; i < input_shape.NumElements(); i++) { - OP_REQUIRES_OK(context, - dense_shape.AddDimWithStatus(input_shape_flat(i))); + total_elements = + MultiplyWithoutOverflow(total_elements, input_shape_flat(i)); + OP_REQUIRES(context, total_elements >= 0, + errors::Internal("Encountered overflow in dense shape")); } sparse::SparseTensor sparse_tensor; OP_REQUIRES_OK(context, - sparse::SparseTensor::Create(input_indices, input_values, - dense_shape, &sparse_tensor)); + sparse::SparseTensor::Create( + input_indices, input_values, + TensorShape(input_shape.vec()), &sparse_tensor)); std::vector outputs; OP_REQUIRES_OK(context, From e2f3fe7df3ba4e59b63060341fb6eb4ea404935c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 6 May 2021 17:45:51 -0700 Subject: [PATCH 216/243] Cherry pick 2.1 Add missing valuidation to FusedBatchNorm --- .../core/kernels/fused_batch_norm_op.cc | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc index a4843ed527712e..1af8e1784f0bac 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.cc +++ b/tensorflow/core/kernels/fused_batch_norm_op.cc @@ -1156,6 +1156,33 @@ class FusedBatchNormOpBase : public OpKernel { context, estimated_variance.dims() == 1, errors::InvalidArgument("estimated_variance must be 1-dimensional", estimated_variance.shape().DebugString())); + + const auto num_channels = GetTensorDim(x, tensor_format_, 'C'); + OP_REQUIRES( + context, scale.NumElements() == num_channels, + errors::InvalidArgument("scale must have the same number of elements " + "as the channels of x, got ", + scale.NumElements(), " and ", num_channels)); + OP_REQUIRES( + context, offset.NumElements() == num_channels, + errors::InvalidArgument("offset must have the same number of elements " + "as the channels of x, got ", + offset.NumElements(), " and ", num_channels)); + if (estimated_mean.NumElements() != 0) { + OP_REQUIRES(context, estimated_mean.NumElements() == num_channels, + errors::InvalidArgument( + "mean must be empty or have the same number of " + "elements as the channels of x, got ", + estimated_mean.NumElements(), " and ", num_channels)); + } + if (estimated_variance.NumElements() != 0) { + OP_REQUIRES(context, estimated_variance.NumElements() == num_channels, + errors::InvalidArgument( + "variance must be empty or have the same number of " + "elements as the channels of x, got ", + estimated_variance.NumElements(), " and ", num_channels)); + } + if (has_side_input_) { OP_REQUIRES(context, side_input.shape() == x.shape(), errors::InvalidArgument( @@ -1168,7 +1195,7 @@ class FusedBatchNormOpBase : public OpKernel { // NOTE(ezhulenev): This requirement is coming from implementation // details of cudnnBatchNormalizationForwardTrainingEx. OP_REQUIRES( - context, !is_training_ || x.dim_size(3) % 4 == 0, + context, !is_training_ || num_channels % 4 == 0, errors::InvalidArgument("FusedBatchNorm with activation requires " "channel dimension to be a multiple of 4.")); } From 1003fb44af0a12f335a364f73de8b81ad5d9a324 Mon Sep 17 00:00:00 2001 From: Yu-Cheng Ling Date: Mon, 3 May 2021 09:07:42 -0700 Subject: [PATCH 217/243] Cherry pick 2.1 TFLite: Error out when the graph has a recurion. --- tensorflow/lite/BUILD | 1 + tensorflow/lite/core/subgraph.cc | 46 ++++++++++++++++++ tensorflow/lite/core/subgraph.h | 4 ++ tensorflow/lite/kernels/while.cc | 2 - tensorflow/lite/model_test.cc | 19 ++++++++ .../lite/testdata/unsupported_recursion.bin | Bin 0 -> 600 bytes 6 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 tensorflow/lite/testdata/unsupported_recursion.bin diff --git a/tensorflow/lite/BUILD b/tensorflow/lite/BUILD index 05c83b3001e544..b9f1994d502e52 100644 --- a/tensorflow/lite/BUILD +++ b/tensorflow/lite/BUILD @@ -320,6 +320,7 @@ cc_test( "testdata/test_min_runtime.bin", "testdata/test_model.bin", "testdata/test_model_broken.bin", + "testdata/unsupported_recursion.bin", ], tags = [ "tflite_not_portable", diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 4f4394243aef92..48b1a51f26e75a 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -117,6 +117,42 @@ TfLiteQuantizationParams GetLegacyQuantization( return legacy_quantization; } +// An utility test to detect if the subgraph is abused: +// 1. Detects if recursion exists in the graph (recursion is not currently +// supported. +// 2. Detects if the interpreter / subgraph is used in multiple subgraphs. +// Note: It's clearly documented that the interpreter / subgraph are not +// thread-safe. This serves as a check with possible false negatives +// unless we switch to atomic boolean flags. +class SubgraphGuard { + public: + SubgraphGuard(TfLiteContext* context, bool* is_subgraph_in_use) + : is_subgraph_in_use_(is_subgraph_in_use) { + if (*is_subgraph_in_use_) { + TF_LITE_KERNEL_LOG( + context, + "Subgraph is already in use. Using an interpreter or a subgraph in " + "multiple threads is not supported. Recursion in the graph is not " + "supported."); + status_ = kTfLiteError; + } else { + *is_subgraph_in_use_ = true; + } + } + ~SubgraphGuard() { + // If tht original status was OK, recover the boolean flag. + if (status_ == kTfLiteOk) { + *is_subgraph_in_use_ = false; + } + } + + TfLiteStatus status() const { return status_; } + + private: + TfLiteStatus status_ = kTfLiteOk; + bool* is_subgraph_in_use_; +}; + } // namespace // A trivial implementation of GraphInfo around the Interpreter. @@ -508,6 +544,7 @@ TfLiteStatus Subgraph::BytesRequired(TfLiteType type, const int* dims, TfLiteStatus Subgraph::AllocateTensors() { TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler_.get(), "AllocateTensors"); + if (!consistent_) { ReportError("AllocateTensors() called on inconsistent model."); return kTfLiteError; @@ -524,6 +561,12 @@ TfLiteStatus Subgraph::AllocateTensors() { return kTfLiteOk; } + // Note `AllocateTensors` sometimes calls itself recursively above + // for delegates. Therefore only the logic below need to be guarded + // by `SubgraphGuard`. + SubgraphGuard guard(&context_, &is_subgraph_in_use_); + TF_LITE_ENSURE_OK(&context_, guard.status()); + next_execution_plan_index_to_prepare_ = 0; next_execution_plan_index_to_plan_allocation_ = 0; if (memory_planner_) { @@ -744,6 +787,9 @@ TfLiteStatus Subgraph::PrepareOpsAndTensors() { } TfLiteStatus Subgraph::Invoke() { + SubgraphGuard guard(&context_, &is_subgraph_in_use_); + TF_LITE_ENSURE_OK(&context_, guard.status()); + if (!consistent_) { ReportError("Invoke called on model that is not consistent."); return kTfLiteError; diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index 91e098b7059d1e..54ee2ba5200f82 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -642,6 +642,10 @@ class Subgraph { // A map of resource variables. Owned by interpreter and shared by multiple // subgraphs. ResourceVariableMap* resource_variables_ = nullptr; + +// Whether the subgraph is currently in use (e.g. running the `Invoke` + // or `AllocateTensors` functions). + bool is_subgraph_in_use_ = false; }; } // namespace tflite diff --git a/tensorflow/lite/kernels/while.cc b/tensorflow/lite/kernels/while.cc index b5902706d7cc3f..8951647bf43bd7 100644 --- a/tensorflow/lite/kernels/while.cc +++ b/tensorflow/lite/kernels/while.cc @@ -132,8 +132,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* subgraphs = this_subgraph->GetSubgraphs(); TF_LITE_ENSURE(context, op_data->cond_subgraph_index < subgraphs->size()); TF_LITE_ENSURE(context, op_data->body_subgraph_index < subgraphs->size()); - TF_LITE_ENSURE(context, - op_data->cond_subgraph_index != op_data->body_subgraph_index); Subgraph* cond_subgraph = (*subgraphs)[op_data->cond_subgraph_index].get(); Subgraph* body_subgraph = (*subgraphs)[op_data->body_subgraph_index].get(); diff --git a/tensorflow/lite/model_test.cc b/tensorflow/lite/model_test.cc index 7dc582b886289d..698055a8acfbe6 100644 --- a/tensorflow/lite/model_test.cc +++ b/tensorflow/lite/model_test.cc @@ -331,6 +331,25 @@ TEST(BasicFlatBufferModel, TestReadRuntimeVersionFromModel) { ASSERT_EQ(model2->GetMinimumRuntime(), "1.10.0"); } +// Recursion & reentrant are not supported in TFLite. +// The test ensures it fails gracefullly instead of crashing with +// a stack overflow. +TEST(BasicFlatBufferModel, TestUnsupportedRecursion) { + const auto model_path = + "third_party/tensorflow/lite/testdata/unsupported_recursion.bin"; + + std::unique_ptr model = + FlatBufferModel::BuildFromFile(model_path); + ASSERT_NE(model, nullptr); + + tflite::ops::builtin::BuiltinOpResolver resolver; + InterpreterBuilder builder(*model, resolver); + std::unique_ptr interpreter; + ASSERT_EQ(builder(&interpreter), kTfLiteOk); + ASSERT_NE(interpreter, nullptr); + ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk); +} + // TODO(aselle): Add tests for serialization of builtin op data types. // These tests will occur with the evaluation tests of individual operators, // not here. diff --git a/tensorflow/lite/testdata/unsupported_recursion.bin b/tensorflow/lite/testdata/unsupported_recursion.bin new file mode 100644 index 0000000000000000000000000000000000000000..525c5383ab4ef6283d687aeb4004b38a8981773a GIT binary patch literal 600 zcmZ9Ky-Nc@5XE0KBoSjwAt)rp6_yhG#1vKvijYXufSnM$!=S-~Aeu6$NGcVy43ffD zv=J10KN>shH2xi)@7-ZXCm}8pCn_Msmv&~WA#tZJ0cCz&tN{cxMIz@J9|!b*ReAQF{b^11cvA(nOQj# zCZ44>NsC+&i(EIS4_B9;x1Szw9@@qB(roJXt*H| z)K!b}Cy_?#d+(@U2g3_29YX)Bc3Md1?cfLgjkUf;)Q?N{Qaur!JZFqWInI}A1=p=W z{yUMC1Hqutn9*~iuSWZfs%Pq$ZsT{)bY7h?O~U>M)(#XZr46U-R&1w=9*%=M1n%g| fP6NS#(KVwVMlX!&zGdTq_ Date: Tue, 27 Apr 2021 17:47:36 -0700 Subject: [PATCH 218/243] CherryPick2.1:Add depth_to_space TFLite op --- tensorflow/lite/kernels/depth_to_space.cc | 1 + tensorflow/lite/kernels/depth_to_space_test.cc | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/tensorflow/lite/kernels/depth_to_space.cc b/tensorflow/lite/kernels/depth_to_space.cc index 561a4340698ab5..1d16804ccf61c2 100644 --- a/tensorflow/lite/kernels/depth_to_space.cc +++ b/tensorflow/lite/kernels/depth_to_space.cc @@ -55,6 +55,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, input->type, output->type); const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); const int input_height = input->dims->data[1]; const int input_width = input->dims->data[2]; const int input_channels = input->dims->data[3]; diff --git a/tensorflow/lite/kernels/depth_to_space_test.cc b/tensorflow/lite/kernels/depth_to_space_test.cc index 8d59a1ad82f418..8a73676fa1c6ad 100644 --- a/tensorflow/lite/kernels/depth_to_space_test.cc +++ b/tensorflow/lite/kernels/depth_to_space_test.cc @@ -55,6 +55,11 @@ TEST(DepthToSpaceOpModel, BadBlockSize) { EXPECT_DEATH(DepthToSpaceOpModel({TensorType_FLOAT32, {1, 1, 1, 4}}, 4), "Cannot allocate tensors"); } + +TEST(DepthToSpaceOpModel, NoBlockSize) { + EXPECT_DEATH(DepthToSpaceOpModel({TensorType_FLOAT32, {1, 1, 1, 4}}, 0), + "Cannot allocate tensors"); +} #endif TEST(DepthToSpaceOpModel, Float32) { From a034348f3431c75a6c3962629da2ebbd4e81cad4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 29 Apr 2021 19:43:09 -0700 Subject: [PATCH 219/243] =?UTF-8?q?CherryPick2.1:Define=20TfLiteFloatArray?= =?UTF-8?q?GetSizeInBytes=20even=20when=20TF=5FLITE=5FSTATIC=5FMEMORY?= =?UTF-8?q?=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tensorflow/lite/c/c_api_internal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/c/c_api_internal.c b/tensorflow/lite/c/c_api_internal.c index 28a430daf55096..9c2e2eac8214e9 100644 --- a/tensorflow/lite/c/c_api_internal.c +++ b/tensorflow/lite/c/c_api_internal.c @@ -43,8 +43,10 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, #ifndef TF_LITE_STATIC_MEMORY TfLiteIntArray* TfLiteIntArrayCreate(int size) { - TfLiteIntArray* ret = - (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size)); + int alloc_size = TfLiteIntArrayGetSizeInBytes(size); + if (alloc_size <= 0) return NULL; + TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); + if (!ret) return ret; ret->size = size; return ret; } From 84f5c517d437c085020c6e21b86fd4cfea998eb2 Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 4 May 2021 15:35:39 -0700 Subject: [PATCH 220/243] CherryPick2.1: Fix heap-buffer-overflow issue with tf.raw_ops.SparseFillEmptyRows --- tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index f674836cb8036f..7b27015265dfec 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -66,7 +66,9 @@ class SparseFillEmptyRowsOp : public OpKernel { default_value_t->shape().DebugString())); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. - + OP_REQUIRES(context, dense_shape_t.NumElements() != 0, + errors::InvalidArgument("Dense shape cannot be empty."), + done); const T& default_value = default_value_t->scalar()(); const auto indices = indices_t->matrix(); const auto values = values_t->vec(); From 12cb728414cff2057212695086b4049701def23c Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 30 Apr 2021 06:36:59 -0700 Subject: [PATCH 221/243] CherryPick:2.1 Fix bugs --- .../core/kernels/decode_padded_raw_op.cc | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tensorflow/core/kernels/decode_padded_raw_op.cc b/tensorflow/core/kernels/decode_padded_raw_op.cc index 12e8ec6aff0d41..d3e830c06f209c 100644 --- a/tensorflow/core/kernels/decode_padded_raw_op.cc +++ b/tensorflow/core/kernels/decode_padded_raw_op.cc @@ -19,6 +19,7 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { @@ -83,14 +84,13 @@ class DecodePaddedRawOp : public OpKernel { // can copy the memory directly. if (!convert_data_endianness_ || sizeof(T) == 1) { for (int64 i = 0; i < flat_in.size(); ++i) { - const T* in_data = reinterpret_cast(flat_in(i).data()); - - if (flat_in(i).size() > fixed_length) { - memcpy(out_data, in_data, fixed_length); - } else { - memcpy(out_data, in_data, flat_in(i).size()); - } - out_data += fixed_length; + const auto to_copy = + std::min(flat_in(i).size(), static_cast(fixed_length)); + memcpy(out_data, flat_in(i).data(), to_copy); + // Note: increase out_data by width since it's already of type T* so + // each shift amount is implicitly multiplied by sizeof(T) according to + // pointer arithmetic rules. + out_data += width; } } else { // Otherwise, the data is not in the host's byte order, and rather than a @@ -105,7 +105,10 @@ class DecodePaddedRawOp : public OpKernel { p_in += sizeof(T), p_out += sizeof(T)) { std::reverse_copy(p_in, p_in + sizeof(T), p_out); } - out_data += fixed_length; + // Note: increase out_data by width since it's already of type T* so + // each shift amount is implicitly multiplied by sizeof(T) according to + // pointer arithmetic rules. + out_data += width; } } } From 8cd9a826e9eeaca036379bd1178d33bd26658a44 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Wed, 2 Jun 2021 19:05:49 -0700 Subject: [PATCH 222/243] Update tensorflow/core/kernels/decode_padded_raw_op.cc --- tensorflow/core/kernels/decode_padded_raw_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/core/kernels/decode_padded_raw_op.cc b/tensorflow/core/kernels/decode_padded_raw_op.cc index d3e830c06f209c..ca7c7104b442d2 100644 --- a/tensorflow/core/kernels/decode_padded_raw_op.cc +++ b/tensorflow/core/kernels/decode_padded_raw_op.cc @@ -19,7 +19,6 @@ limitations under the License. #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { From 91eb42dc6dcd04c806a680c1db754e6a54048c2e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 11 May 2021 18:32:03 -0700 Subject: [PATCH 223/243] Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373274848 Change-Id: I3a665ac3a29dee9fb69bdf408a939330cb93ea75 --- .../kernels/sparse_sparse_binary_op_shared.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc index 8f1d0e30a7648a..3ad87bb2e419d4 100644 --- a/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc +++ b/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc @@ -150,6 +150,7 @@ class SparseSparseBinaryOpShared : public OpKernel { const int64 a_nnz = a_indices_t->dim_size(0); const int64 b_nnz = b_indices_t->dim_size(0); + const auto a_values = a_values_t->vec(); const auto b_values = b_values_t->vec(); @@ -166,6 +167,14 @@ class SparseSparseBinaryOpShared : public OpKernel { "Input shapes should be a vector but received shapes ", a_shape_t->shape().DebugString(), " and ", b_shape_t->shape().DebugString())); + const int num_dims = a_indices_t->dim_size(1); + OP_REQUIRES( + ctx, a_shape_t->NumElements() == num_dims, + errors::InvalidArgument("Second dimension of a_indices and length of " + "a_shape must match, got ", + num_dims, " and ", a_shape_t->NumElements())); + OP_REQUIRES(ctx, num_dims > 0, + errors::InvalidArgument("Tensors must not be empty")); OP_REQUIRES(ctx, a_shape_t->IsSameSize(*b_shape_t), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", @@ -180,12 +189,6 @@ class SparseSparseBinaryOpShared : public OpKernel { " for dimension ", i)); } - OP_REQUIRES( - ctx, a_indices_t->dim_size(1) == b_indices_t->dim_size(1), - errors::InvalidArgument( - "Indices' dimensions do not match: got ", a_indices_t->dim_size(1), - " and ", b_indices_t->dim_size(1), " for the second dimension.")); - const int num_dims = a_indices_t->dim_size(1); const auto a_indices_mat = a_indices_t->matrix(); const auto b_indices_mat = b_indices_t->matrix(); std::vector a_augmented_values, b_augmented_values; From 05ad84fae7a926672036e90ab84510b863365430 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 08:04:04 -0700 Subject: [PATCH 224/243] Bump curl --- tensorflow/workspace.bzl | 8 +-- third_party/curl.BUILD | 149 +++++++++++++++++++++++++++++++-------- 2 files changed, 122 insertions(+), 35 deletions(-) diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 420073e8606940..41d6b1bae75133 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -502,12 +502,12 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "curl", build_file = clean_dep("//third_party:curl.BUILD"), - sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", - strip_prefix = "curl-7.69.1", + sha256 = "3b4378156ba09e224008e81dcce854b7ce4d182b1f9cfb97fe5ed9e9c18c6bd3", + strip_prefix = "curl-7.76.0", system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz", - "https://curl.haxx.se/download/curl-7.69.1.tar.gz", + "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.76.0.tar.gz", + "https://curl.haxx.se/download/curl-7.76.0.tar.gz", ], ) diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD index 2813b980d47ca4..a7a072631440c9 100644 --- a/third_party/curl.BUILD +++ b/third_party/curl.BUILD @@ -25,20 +25,33 @@ CURL_WIN_SRCS = [ "lib/asyn-thread.c", "lib/inet_ntop.c", "lib/system_win32.c", - "lib/vtls/schannel.c", - "lib/idn_win32.c", + "lib/setup-win32.h", ] cc_library( name = "curl", srcs = [ "include/curl_config.h", + "lib/altsvc.c", + "lib/altsvc.h", + "lib/amigaos.c", "lib/amigaos.h", "lib/arpa_telnet.h", - "lib/asyn.h", "lib/asyn-ares.c", + "lib/asyn.h", "lib/base64.c", + "lib/c-hyper.c", + "lib/c-hyper.h", + "lib/config-amigaos.h", + "lib/config-dos.h", + "lib/config-mac.h", + "lib/config-os400.h", + "lib/config-plan9.h", + "lib/config-riscos.h", + "lib/config-tpf.h", + "lib/config-vxworks.h", "lib/config-win32.h", + "lib/config-win32ce.h", "lib/conncache.c", "lib/conncache.h", "lib/connect.c", @@ -52,14 +65,20 @@ cc_library( "lib/curl_base64.h", "lib/curl_ctype.c", "lib/curl_ctype.h", + "lib/curl_des.c", "lib/curl_des.h", + "lib/curl_endian.c", "lib/curl_endian.h", "lib/curl_fnmatch.c", "lib/curl_fnmatch.h", + "lib/curl_get_line.c", + "lib/curl_get_line.h", "lib/curl_gethostname.c", "lib/curl_gethostname.h", + "lib/curl_gssapi.c", "lib/curl_gssapi.h", "lib/curl_hmac.h", + "lib/curl_krb5.h", "lib/curl_ldap.h", "lib/curl_md4.h", "lib/curl_md5.h", @@ -68,14 +87,19 @@ cc_library( "lib/curl_memrchr.h", "lib/curl_multibyte.c", "lib/curl_multibyte.h", + "lib/curl_ntlm_core.c", "lib/curl_ntlm_core.h", + "lib/curl_ntlm_wb.c", "lib/curl_ntlm_wb.h", + "lib/curl_path.c", + "lib/curl_path.h", "lib/curl_printf.h", + "lib/curl_range.c", + "lib/curl_range.h", "lib/curl_rtmp.c", "lib/curl_rtmp.h", "lib/curl_sasl.c", "lib/curl_sasl.h", - "lib/curl_sec.h", "lib/curl_setup.h", "lib/curl_setup_once.h", "lib/curl_sha256.h", @@ -84,23 +108,35 @@ cc_library( "lib/curl_threads.c", "lib/curl_threads.h", "lib/curlx.h", + "lib/dict.c", "lib/dict.h", + "lib/doh.c", + "lib/doh.h", "lib/dotdot.c", "lib/dotdot.h", + "lib/dynbuf.c", + "lib/dynbuf.h", "lib/easy.c", + "lib/easygetopt.c", "lib/easyif.h", + "lib/easyoptions.c", + "lib/easyoptions.h", "lib/escape.c", "lib/escape.h", + "lib/file.c", "lib/file.h", "lib/fileinfo.c", "lib/fileinfo.h", "lib/formdata.c", "lib/formdata.h", + "lib/ftp.c", "lib/ftp.h", + "lib/ftplistparser.c", "lib/ftplistparser.h", "lib/getenv.c", "lib/getinfo.c", "lib/getinfo.h", + "lib/gopher.c", "lib/gopher.h", "lib/hash.c", "lib/hash.h", @@ -113,6 +149,8 @@ cc_library( "lib/hostip4.c", "lib/hostip6.c", "lib/hostsyn.c", + "lib/hsts.c", + "lib/hsts.h", "lib/http.c", "lib/http.h", "lib/http2.c", @@ -121,17 +159,24 @@ cc_library( "lib/http_chunks.h", "lib/http_digest.c", "lib/http_digest.h", + "lib/http_negotiate.c", "lib/http_negotiate.h", + "lib/http_ntlm.c", "lib/http_ntlm.h", "lib/http_proxy.c", "lib/http_proxy.h", + "lib/http_aws_sigv4.c", + "lib/http_aws_sigv4.h", + "lib/idn_win32.c", "lib/if2ip.c", "lib/if2ip.h", + "lib/imap.c", "lib/imap.h", "lib/inet_ntop.h", "lib/inet_pton.c", "lib/inet_pton.h", "lib/krb5.c", + "lib/ldap.c", "lib/llist.c", "lib/llist.h", "lib/md4.c", @@ -141,38 +186,43 @@ cc_library( "lib/mime.c", "lib/mime.h", "lib/mprintf.c", + "lib/mqtt.c", + "lib/mqtt.h", "lib/multi.c", "lib/multihandle.h", "lib/multiif.h", "lib/netrc.c", "lib/netrc.h", + "lib/non-ascii.c", "lib/non-ascii.h", "lib/nonblock.c", "lib/nonblock.h", - "lib/nwlib.c", - "lib/nwos.c", + #"lib/nwlib.c", + #"lib/nwos.c", + "lib/openldap.c", "lib/parsedate.c", "lib/parsedate.h", - "lib/pingpong.h", "lib/pingpong.c", + "lib/pingpong.h", + "lib/pop3.c", "lib/pop3.h", "lib/progress.c", "lib/progress.h", + "lib/psl.c", + "lib/psl.h", "lib/quic.h", "lib/rand.c", "lib/rand.h", - "lib/rename.h", "lib/rename.c", + "lib/rename.h", "lib/rtsp.c", "lib/rtsp.h", - "lib/security.c", "lib/select.c", "lib/select.h", "lib/sendf.c", "lib/sendf.h", "lib/setopt.c", "lib/setopt.h", - "lib/setup-os400.h", "lib/setup-vms.h", "lib/sha256.c", "lib/share.c", @@ -180,13 +230,17 @@ cc_library( "lib/sigpipe.h", "lib/slist.c", "lib/slist.h", + "lib/smb.c", "lib/smb.h", + "lib/smtp.c", "lib/smtp.h", "lib/sockaddr.h", - "lib/socketpair.h", "lib/socketpair.c", + "lib/socketpair.h", "lib/socks.c", "lib/socks.h", + "lib/socks_gssapi.c", + "lib/socks_sspi.c", "lib/speedcheck.c", "lib/speedcheck.h", "lib/splay.c", @@ -202,7 +256,9 @@ cc_library( "lib/strtoofft.c", "lib/strtoofft.h", "lib/system_win32.h", + "lib/telnet.c", "lib/telnet.h", + "lib/tftp.c", "lib/tftp.h", "lib/timeval.c", "lib/timeval.h", @@ -211,44 +267,69 @@ cc_library( "lib/url.c", "lib/url.h", "lib/urldata.h", + "lib/urlapi-int.h", + "lib/urlapi.c", + "lib/version.c", + "lib/version_win32.c", + "lib/version_win32.h", + "lib/warnless.c", + "lib/warnless.h", + "lib/wildcard.c", + "lib/wildcard.h", + "lib/x509asn1.c", + "lib/x509asn1.h", "lib/vauth/cleartext.c", "lib/vauth/cram.c", "lib/vauth/digest.c", "lib/vauth/digest.h", + "lib/vauth/digest_sspi.c", + "lib/vauth/krb5_gssapi.c", + "lib/vauth/krb5_sspi.c", + "lib/vauth/ntlm.c", "lib/vauth/ntlm.h", + "lib/vauth/ntlm_sspi.c", "lib/vauth/oauth2.c", + "lib/vauth/spnego_sspi.c", "lib/vauth/vauth.c", "lib/vauth/vauth.h", - "lib/version.c", + "lib/vquic/ngtcp2.c", + "lib/vquic/ngtcp2.h", + "lib/vquic/quiche.c", + "lib/vquic/quiche.h", + "lib/vquic/vquic.c", + "lib/vquic/vquic.h", + "lib/vssh/libssh.c", + "lib/vssh/libssh2.c", "lib/vssh/ssh.h", + "lib/vssh/wolfssh.c", + "lib/vtls/bearssl.c", "lib/vtls/bearssl.h", + "lib/vtls/gskit.c", "lib/vtls/gskit.h", + "lib/vtls/gtls.c", "lib/vtls/gtls.h", + "lib/vtls/keylog.c", + "lib/vtls/keylog.h", + "lib/vtls/mbedtls.c", "lib/vtls/mbedtls.h", + "lib/vtls/mbedtls_threadlock.c", + "lib/vtls/mbedtls_threadlock.h", + "lib/vtls/mesalink.c", + "lib/vtls/mesalink.h", + "lib/vtls/nss.c", "lib/vtls/nssg.h", + "lib/vtls/openssl.c", "lib/vtls/openssl.h", + "lib/vtls/rustls.c", + "lib/vtls/rustls.h", + "lib/vtls/schannel.c", "lib/vtls/schannel.h", + "lib/vtls/schannel_verify.c", + "lib/vtls/sectransp.h", "lib/vtls/vtls.c", "lib/vtls/vtls.h", + "lib/vtls/wolfssl.c", "lib/vtls/wolfssl.h", - "lib/warnless.c", - "lib/warnless.h", - "lib/wildcard.c", - "lib/wildcard.h", - "lib/x509asn1.h", - "lib/psl.h", - "lib/psl.c", - "lib/vtls/sectransp.h", - "lib/vtls/mesalink.h", - "lib/vtls/mesalink.c", - "lib/curl_get_line.h", - "lib/curl_get_line.c", - "lib/urlapi-int.h", - "lib/urlapi.c", - "lib/altsvc.h", - "lib/altsvc.c", - "lib/doh.h", - "lib/doh.c", ] + select({ "@org_tensorflow//tensorflow:macos": [ "lib/vtls/sectransp.c", @@ -258,7 +339,6 @@ cc_library( ], "@org_tensorflow//tensorflow:windows": CURL_WIN_SRCS, "//conditions:default": [ - "lib/vtls/openssl.c", ], }), hdrs = [ @@ -267,6 +347,7 @@ cc_library( "include/curl/easy.h", "include/curl/mprintf.h", "include/curl/multi.h", + "include/curl/options.h", "include/curl/stdcheaders.h", "include/curl/system.h", "include/curl/typecheck-gcc.h", @@ -370,6 +451,8 @@ cc_binary( "src/tool_doswin.h", "src/tool_easysrc.c", "src/tool_easysrc.h", + "src/tool_filetime.c", + "src/tool_filetime.h", "src/tool_formparse.c", "src/tool_formparse.h", "src/tool_getparam.c", @@ -404,6 +487,8 @@ cc_binary( "src/tool_paramhlp.h", "src/tool_parsecfg.c", "src/tool_parsecfg.h", + "src/tool_progress.c", + "src/tool_progress.h", "src/tool_sdecls.h", "src/tool_setopt.c", "src/tool_setopt.h", @@ -423,6 +508,8 @@ cc_binary( "src/tool_writeenv.h", "src/tool_writeout.c", "src/tool_writeout.h", + "src/tool_writeout_json.c", + "src/tool_writeout_json.h", "src/tool_xattr.c", "src/tool_xattr.h", ], From 2ccf8d6b17bbf003ff807508d392b42124d1f339 Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Thu, 3 Jun 2021 10:42:54 -0700 Subject: [PATCH 225/243] Insert release notes place-fill --- RELEASE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index 2c5fdb45403c39..2a354424a6bc79 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,7 @@ +# Release 2.1.4 + + + # Release 2.1.3 ## Bug Fixes and Other Changes From 6e152318b61f340510102ca9000e898507faedce Mon Sep 17 00:00:00 2001 From: TensorFlow Release Automation Date: Thu, 3 Jun 2021 11:52:23 -0700 Subject: [PATCH 226/243] Update version numbers to 2.1.4 --- tensorflow/core/public/version.h | 2 +- tensorflow/tensorflow.bzl | 2 +- tensorflow/tools/pip_package/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index e360eae2f64fb6..0f1a7a6ebc3d23 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -22,7 +22,7 @@ limitations under the License. // tensorflow/tools/pip_package/setup.py #define TF_MAJOR_VERSION 2 #define TF_MINOR_VERSION 1 -#define TF_PATCH_VERSION 3 +#define TF_PATCH_VERSION 4 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 1cb6af650a635c..60623302fb8550 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -54,7 +54,7 @@ def register_extension_info(**kwargs): # not contain rc or alpha, only numbers. # Also update tensorflow/core/public/version.h # and tensorflow/tools/pip_package/setup.py -VERSION = "2.1.3" +VERSION = "2.1.4" VERSION_MAJOR = VERSION.split(".")[0] def if_v2(a): diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 7fd04c9869746b..099a6a6c3ce9d6 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -47,7 +47,7 @@ # result for pip. # Also update tensorflow/tensorflow.bzl and # tensorflow/core/public/version.h -_VERSION = '2.1.3' +_VERSION = '2.1.4' REQUIRED_PACKAGES = [ 'absl-py >= 0.7.0', From 840afa56069879093f2623fa552753565e040893 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 12:09:43 -0700 Subject: [PATCH 227/243] Update RELEASE.md --- RELEASE.md | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 2a354424a6bc79..c44de3a7286733 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,116 @@ # Release 2.1.4 - +This release introduces several vulnerability fixes: + + * Fixes a heap buffer overflow in `RaggedBinCount` ([CVE-2021-29512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29512)) + * Fixes a heap out of bounds write in `RaggedBinCount` ([CVE-2021-29514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29514)) + * Fixes a type confusion during tensor casts which leads to dereferencing null pointers ([CVE-2021-29513](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29513)) + * Fixes a reference binding to null pointer in `MatrixDiag*` ops ([CVE-2021-29515](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29515)) + * Fixes a null pointer dereference via invalid Ragged Tensors ([CVE-2021-29516](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29516)) + * Fixes a division by zero in `Conv3D` ([CVE-2021-29517](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29517)) + * Fixes vulnerabilities where session operations in eager mode lead to null pointer dereferences ([CVE-2021-29518](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29518)) + * Fixes a `CHECK`-fail in `SparseCross` caused by type confusion ([CVE-2021-29519](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29519)) + * Fixes a segfault in `SparseCountSparseOutput` ([CVE-2021-29521](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29521)) + * Fixes a heap buffer overflow in `Conv3DBackprop*` ([CVE-2021-29520](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29520)) + * Fixes a division by 0 in `Conv3DBackprop*` ([CVE-2021-29522](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29522)) + * Fixes a `CHECK`-fail in `AddManySparseToTensorsMap` ([CVE-2021-29523](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29523)) + * Fixes a division by 0 in `Conv2DBackpropFilter` ([CVE-2021-29524](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29524)) + * Fixes a division by 0 in `Conv2DBackpropInput` ([CVE-2021-29525](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29525)) + * Fixes a division by 0 in `Conv2D` ([CVE-2021-29526](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29526)) + * Fixes a division by 0 in `QuantizedConv2D` ([CVE-2021-29527](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29527)) + * Fixes a division by 0 in `QuantizedMul` ([CVE-2021-29528](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29528)) + * Fixes vulnerabilities caused by invalid validation in `SparseMatrixSparseCholesky` ([CVE-2021-29530](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29530)) + * Fixes a heap buffer overflow caused by rounding ([CVE-2021-29529](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29529)) + * Fixes a `CHECK`-fail in `tf.raw_ops.EncodePng` ([CVE-2021-29531](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29531)) + * Fixes a heap out of bounds read in `RaggedCross` ([CVE-2021-29532](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29532)) + * Fixes a `CHECK`-fail in `DrawBoundingBoxes` ([CVE-2021-29533](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29533)) + * Fixes a heap buffer overflow in `QuantizedMul` ([CVE-2021-29535](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29535)) + * Fixes a `CHECK`-fail in `SparseConcat` ([CVE-2021-29534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29534)) + * Fixes a heap buffer overflow in `QuantizedResizeBilinear` ([CVE-2021-29537](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29537)) + * Fixes a heap buffer overflow in `QuantizedReshape` ([CVE-2021-29536](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29536)) + * Fixes a division by zero in `Conv2DBackpropFilter` ([CVE-2021-29538](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29538)) + * Fixes a heap buffer overflow in `Conv2DBackpropFilter` ([CVE-2021-29540](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29540)) + * Fixes a heap buffer overflow in `StringNGrams` ([CVE-2021-29542](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29542)) + * Fixes a null pointer dereference in `StringNGrams` ([CVE-2021-29541](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29541)) + * Fixes a `CHECK`-fail in `QuantizeAndDequantizeV4Grad` ([CVE-2021-29544](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29544)) + * Fixes a `CHECK`-fail in `CTCGreedyDecoder` ([CVE-2021-29543](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29543)) + * Fixes a heap buffer overflow in `SparseTensorToCSRSparseMatrix` ([CVE-2021-29545](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29545)) + * Fixes a division by 0 in `QuantizedBiasAdd` ([CVE-2021-29546](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29546)) + * Fixes a heap out of bounds in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29547](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29547)) + * Fixes a division by 0 in `QuantizedBatchNormWithGlobalNormalization` ([CVE-2021-29548](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29548)) + * Fixes a division by 0 in `QuantizedAdd` ([CVE-2021-29549](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29549)) + * Fixes a division by 0 in `FractionalAvgPool` ([CVE-2021-29550](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29550)) + * Fixes an OOB read in `MatrixTriangularSolve` ([CVE-2021-29551](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29551)) + * Fixes a heap OOB in `QuantizeAndDequantizeV3` ([CVE-2021-29553](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29553)) + * Fixes a `CHECK`-failure in `UnsortedSegmentJoin` ([CVE-2021-29552](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29552)) + * Fixes a division by 0 in `DenseCountSparseOutput` ([CVE-2021-29554](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29554)) + * Fixes a division by 0 in `FusedBatchNorm` ([CVE-2021-29555](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29555)) + * Fixes a division by 0 in `SparseMatMul` ([CVE-2021-29557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29557)) + * Fixes a division by 0 in `Reverse` ([CVE-2021-29556](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29556)) + * Fixes a heap buffer overflow in `SparseSplit` ([CVE-2021-29558](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29558)) + * Fixes a heap OOB access in unicode ops ([CVE-2021-29559](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29559)) + * Fixes a heap buffer overflow in `RaggedTensorToTensor` ([CVE-2021-29560](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29560)) + * Fixes a `CHECK`-fail in `LoadAndRemapMatrix` ([CVE-2021-29561](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29561)) + * Fixes a `CHECK`-fail in `tf.raw_ops.IRFFT` ([CVE-2021-29562](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29562)) + * Fixes a `CHECK`-fail in `tf.raw_ops.RFFT` ([CVE-2021-29563](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29563)) + * Fixes a null pointer dereference in `EditDistance` ([CVE-2021-29564](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29564)) + * Fixes a null pointer dereference in `SparseFillEmptyRows` ([CVE-2021-29565](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29565)) + * Fixes a heap OOB access in `Dilation2DBackpropInput` ([CVE-2021-29566](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29566)) + * Fixes a reference binding to null in `ParameterizedTruncatedNormal` ([CVE-2021-29568](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29568)) + * Fixes a set of vulnerabilities caused by lack of validation in `SparseDenseCwiseMul` ([CVE-2021-29567](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29567)) + * Fixes a heap out of bounds read in `MaxPoolGradWithArgmax` ([CVE-2021-29570](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29570)) + * Fixes a heap out of bounds read in `RequantizationRange` ([CVE-2021-29569](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29569)) + * Fixes a memory corruption in `DrawBoundingBoxesV2` ([CVE-2021-29571](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29571)) + * Fixes a reference binding to nullptr in `SdcaOptimizer` ([CVE-2021-29572](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29572)) + * Fixes an overflow and a denial of service in `tf.raw_ops.ReverseSequence` ([CVE-2021-29575](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29575)) + * Fixes a division by 0 in `MaxPoolGradWithArgmax` ([CVE-2021-29573](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29573)) + * Fixes an undefined behavior in `MaxPool3DGradGrad` ([CVE-2021-29574](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29574)) + * Fixes a heap buffer overflow in `MaxPool3DGradGrad` ([CVE-2021-29576](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29576)) + * Fixes a heap buffer overflow in `AvgPool3DGrad` ([CVE-2021-29577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29577)) + * Fixes an undefined behavior and a `CHECK`-fail in `FractionalMaxPoolGrad` ([CVE-2021-29580](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29580)) + * Fixes a heap buffer overflow in `FractionalAvgPoolGrad` ([CVE-2021-29578](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29578)) + * Fixes a heap buffer overflow in `MaxPoolGrad` ([CVE-2021-29579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29579)) + * Fixes a segfault in `CTCBeamSearchDecoder` ([CVE-2021-29581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29581)) + * Fixes a heap OOB read in `tf.raw_ops.Dequantize` ([CVE-2021-29582](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29582)) + * Fixes a `CHECK`-fail due to integer overflow ([CVE-2021-29584](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29584)) + * Fixes a heap buffer overflow and undefined behavior in `FusedBatchNorm` ([CVE-2021-29583](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29583)) + * Fixes a division by zero in padding computation in TFLite ([CVE-2021-29585](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29585)) + * Fixes a division by zero in optimized pooling implementations in TFLite ([CVE-2021-29586](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29586)) + * Fixes a division by zero in TFLite's implementation of `SpaceToDepth` ([CVE-2021-29587](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29587)) + * Fixes a division by zero in TFLite's implementation of `GatherNd` ([CVE-2021-29589](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29589)) + * Fixes a division by zero in TFLite's implementation of `TransposeConv` ([CVE-2021-29588](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29588)) + * Fixes a heap OOB read in TFLite's implementation of `Minimum` or `Maximum` ([CVE-2021-29590](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29590)) + * Fixes a null pointer dereference in TFLite's `Reshape` operator ([CVE-2021-29592](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29592)) + * Fixes a stack overflow due to looping TFLite subgraph ([CVE-2021-29591](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29591)) + * Fixes a division by zero in TFLite's implementation of `DepthToSpace` ([CVE-2021-29595](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29595)) + * Fixes a division by zero in TFLite's convolution code ([CVE-2021-29594](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29594)) + * Fixes a division by zero in TFLite's implementation of `EmbeddingLookup` ([CVE-2021-29596](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29596)) + * Fixes a division by zero in TFLite's implementation of `BatchToSpaceNd` ([CVE-2021-29593](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29593)) + * Fixes a division by zero in TFLite's implementation of `SpaceToBatchNd` ([CVE-2021-29597](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29597)) + * Fixes a division by zero in TFLite's implementation of `SVDF` ([CVE-2021-29598](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29598)) + * Fixes a division by zero in TFLite's implementation of `Split` ([CVE-2021-29599](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29599)) + * Fixes a division by zero in TFLite's implementation of `OneHot` ([CVE-2021-29600](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29600)) + * Fixes a division by zero in TFLite's implementation of `DepthwiseConv` ([CVE-2021-29602](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29602)) + * Fixes a division by zero in TFLite's implementation of hashtable lookup ([CVE-2021-29604](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29604)) + * Fixes a integer overflow in TFLite concatentation ([CVE-2021-29601](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29601)) + * Fixes a integer overflow in TFLite memory allocation ([CVE-2021-29605](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29605)) + * Fixes a heap OOB write in TFLite ([CVE-2021-29603](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29603)) + * Fixes a heap OOB read in TFLite ([CVE-2021-29606](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29606)) + * Fixes a heap OOB and null pointer dereference in `RaggedTensorToTensor` ([CVE-2021-29608](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29608)) + * Fixes vulnerabilities caused by incomplete validation in `SparseAdd` ([CVE-2021-29609](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29609)) + * Fixes vulnerabilities caused by incomplete validation in `SparseSparseMinimum` ([CVE-2021-29607](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29607)) + * Fixes vulnerabilities caused by incomplete validation in `SparseReshape` ([CVE-2021-29611](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29611)) + * Fixes vulnerabilities caused by invalid validation in `QuantizeAndDequantizeV2` ([CVE-2021-29610](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29610)) + * Fixes a heap buffer overflow in `BandedTriangularSolve` ([CVE-2021-29612](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29612)) + * Fixes vulnerabilities caused by incomplete validation in `tf.raw_ops.CTCLoss` ([CVE-2021-29613](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29613)) + * Fixes an interpreter crash from vulnerabilities in `tf.io.decode_raw` ([CVE-2021-29614](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29614)) + * Fixes a stack overflow in `ParseAttrValue` with nested tensors ([CVE-2021-29615](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29615)) + * Fixes a null dereference in Grappler's `TrySimplify` ([CVE-2021-29616](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29616)) + * Fixes a crash in `tf.transpose` with complex inputs ([CVE-2021-29618](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29618)) + * Fixes a crash in `tf.strings.substr` due to `CHECK`-fail ([CVE-2021-29617](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29617)) + * Fixes a segfault in `tf.raw_ops.SparseCountSparseOutput` ([CVE-2021-29619](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29619)) + * Fixes a segfault in `tf.raw_ops.ImmutableConst` ([CVE-2021-29539](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29539)) + * Updates `curl` to `7.76.0` to handle [CVE-2020-8169](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8169), [CVE-2020-8177](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8177), [CVE-2020-8231](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8231), [CVE-2020-8284](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8284), [CVE-2020-8285](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8285) and [CVE-2020-8286](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-8286). # Release 2.1.3 From 5306d810ff146f355c2a527d435621133b51a714 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 12:20:37 -0700 Subject: [PATCH 228/243] Fix broken cherrypick --- tensorflow/core/kernels/sparse_cross_op.cc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 7843e651c599c0..a508bc76ba553f 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -311,11 +311,8 @@ class SparseCrossOp : public OpKernel { context->input_list("dense_inputs", &dense_list_in)); DataType internal_type = internal_type_; OP_REQUIRES_OK( - context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, - dense_list_in, internal_type)); - - ValidateInput(context, indices_list_in, values_list_in, shapes_list_in, - dense_list_in); + context, ValidateInput(context, indices_list_in, values_list_in, + shapes_list_in, dense_list_in, internal_type)); std::vector>> columns = GenerateColumnsFromInput(indices_list_in, values_list_in, From 847bcccb086612b2eba631137e18c0d256dfdea4 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 13:00:10 -0700 Subject: [PATCH 229/243] Fix broekn branch after cherrypick --- tensorflow/core/kernels/sparse_cross_op.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index a508bc76ba553f..cf02e89b5ea48b 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -584,6 +584,7 @@ class SparseCrossOp : public OpKernel { } int64 num_buckets_; uint64 hash_key_; + DataType internal_type_; }; REGISTER_KERNEL_BUILDER(Name("SparseCross") From 70bee4c640cf7d475e65fa8f69b544409a7ea4bf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 3 Jun 2021 14:49:24 -0700 Subject: [PATCH 230/243] Fix cherrypick breakage --- tensorflow/lite/core/subgraph.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 48b1a51f26e75a..4ec8a6b0097454 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -129,8 +129,8 @@ class SubgraphGuard { SubgraphGuard(TfLiteContext* context, bool* is_subgraph_in_use) : is_subgraph_in_use_(is_subgraph_in_use) { if (*is_subgraph_in_use_) { - TF_LITE_KERNEL_LOG( - context, + TF_LITE_ENSURE_MSG( + context, false, "Subgraph is already in use. Using an interpreter or a subgraph in " "multiple threads is not supported. Recursion in the graph is not " "supported."); From 1feecbe60e6162f5a0d5dfc6d25aa797ffd49982 Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Thu, 3 Jun 2021 16:34:25 -0700 Subject: [PATCH 231/243] Update sparse_fill_empty_rows_op.cc --- tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index 7b27015265dfec..55f0b577417785 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -67,8 +67,7 @@ class SparseFillEmptyRowsOp : public OpKernel { // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. OP_REQUIRES(context, dense_shape_t.NumElements() != 0, - errors::InvalidArgument("Dense shape cannot be empty."), - done); + errors::InvalidArgument("Dense shape cannot be empty.")); const T& default_value = default_value_t->scalar()(); const auto indices = indices_t->matrix(); const auto values = values_t->vec(); From 600aab4ffa8fed88b594bcd3b4abe95fe7b4f0ae Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Thu, 3 Jun 2021 17:43:25 -0700 Subject: [PATCH 232/243] Update jsoncpp.BUILD --- third_party/jsoncpp.BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/jsoncpp.BUILD b/third_party/jsoncpp.BUILD index f41964477ce963..ee3abd70502400 100644 --- a/third_party/jsoncpp.BUILD +++ b/third_party/jsoncpp.BUILD @@ -13,9 +13,9 @@ cc_library( ], hdrs = [ "include/json/config.h", - "include/json/features.h", "include/json/forwards.h", "include/json/json.h", + "include/json/json_features.h", "include/json/reader.h", "include/json/value.h", "include/json/version.h", From 52128d4dd0b3c9cf0fd9e9f49a49a20c0bebb772 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 13:20:42 -0700 Subject: [PATCH 233/243] Update conv.cc --- tensorflow/lite/kernels/conv.cc | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 191711bd40e6d7..c882edcfb03a63 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -649,11 +649,12 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, } template -void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* im2col, - TfLiteTensor* hwcn_weights, TfLiteTensor* output) { +TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, OpData* data, + TfLiteTensor* input, TfLiteTensor* filter, + TfLiteTensor* bias, TfLiteTensor* im2col, + TfLiteTensor* hwcn_weights, + TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); @@ -721,10 +722,11 @@ void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, } template -void EvalHybrid(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, OpData* data, TfLiteTensor* input, - TfLiteTensor* filter, TfLiteTensor* bias, TfLiteTensor* im2col, - TfLiteTensor* hwcn_weights, TfLiteTensor* output) { +TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, OpData* data, + TfLiteTensor* input, TfLiteTensor* filter, + TfLiteTensor* bias, TfLiteTensor* im2col, + TfLiteTensor* hwcn_weights, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); @@ -811,12 +813,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: if (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8) { if (is_hybrid_per_channel) { - EvalHybridPerChannel(context, node, params, data, input, - filter, bias, im2col, hwcn_weights, - output); + TF_LITE_ENSURE_OK(context, EvalHybridPerChannel( + context, node, params, data, input, + filter, bias, im2col, hwcn_weights, + output)); } else { - EvalHybrid(context, node, params, data, input, filter, - bias, im2col, hwcn_weights, output); + TF_LITE_ENSURE_OK(context, + EvalHybrid(context, node, params, data, + input, filter, bias, im2col, + hwcn_weights, output)); } } else { EvalFloat(context, node, params, data, input, filter, bias, From 51aa6a231a0978075edb849a43bd702eb15bbfaf Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 14:03:59 -0700 Subject: [PATCH 234/243] Fix usage of TF_LITE_ENSURE_MSG in constructor --- tensorflow/lite/core/subgraph.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 4ec8a6b0097454..50d7e1cb724b2b 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -129,11 +129,7 @@ class SubgraphGuard { SubgraphGuard(TfLiteContext* context, bool* is_subgraph_in_use) : is_subgraph_in_use_(is_subgraph_in_use) { if (*is_subgraph_in_use_) { - TF_LITE_ENSURE_MSG( - context, false, - "Subgraph is already in use. Using an interpreter or a subgraph in " - "multiple threads is not supported. Recursion in the graph is not " - "supported."); + context->ReportError(context, "Subgraph is already in use."); status_ = kTfLiteError; } else { *is_subgraph_in_use_ = true; From 24db7c1902450c90ebf7a7df37ffd2b6712c4b4c Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Fri, 4 Jun 2021 15:25:58 -0700 Subject: [PATCH 235/243] Update ragged_tensor_to_variant_op.cc --- tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index 1457e5e2c73f7d..49c4a2411b8c3c 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -176,7 +176,7 @@ class RaggedTensorToVariantOp : public OpKernel { // Unbatch the Ragged Tensor and encode the components. std::vector ragged_components; auto batched_splits_top_vec = - batched_ragged_input.splits(0).vec(); + batched_ragged_input.nested_splits[0].vec(); int num_components = batched_splits_top_vec.size() - 1; OP_REQUIRES(context, num_components >= 0, errors::Internal("Invalid split argument.")); From 5533d84fd691b3666df8bc97f3bff1d9ff5ff39e Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 16:07:32 -0700 Subject: [PATCH 236/243] Fix branch after cherrypick --- tensorflow/core/kernels/sparse_cross_op.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index cf02e89b5ea48b..d1181f923ea914 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -313,6 +313,7 @@ class SparseCrossOp : public OpKernel { OP_REQUIRES_OK( context, ValidateInput(context, indices_list_in, values_list_in, shapes_list_in, dense_list_in, internal_type)); + OP_REQUIRES_OK(context, context->status()); std::vector>> columns = GenerateColumnsFromInput(indices_list_in, values_list_in, @@ -351,12 +352,12 @@ class SparseCrossOp : public OpKernel { private: // Validates input tensors. - void ValidateInput(OpKernelContext* context, - const OpInputList& indices_list_in, - const OpInputList& values_list_in, - const OpInputList& shapes_list_in, - const OpInputList& dense_list_in, - const DataType& internal_type) { + Status ValidateInput(OpKernelContext* context, + const OpInputList& indices_list_in, + const OpInputList& values_list_in, + const OpInputList& shapes_list_in, + const OpInputList& dense_list_in, + const DataType& internal_type) { const auto size = indices_list_in.size(); // Only perform internal_type check for SparseCrossOp. // Check if the internal_type is not invalid before doing so. @@ -458,6 +459,8 @@ class SparseCrossOp : public OpKernel { " got ", dense_list_in[i].dim_size(0), " at dense tensor ", i)); } + + return Status::OK(); } // Calculate the batch size from either the shapes input or the dense input. From f249577a5062151336739e8fcd471ac015ff1f07 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 17:37:31 -0700 Subject: [PATCH 237/243] Another attempt at fixing branch --- tensorflow/core/kernels/sparse_cross_op.cc | 39 ++++++++++++---------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index d1181f923ea914..745f07003c831c 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -310,9 +310,8 @@ class SparseCrossOp : public OpKernel { OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); DataType internal_type = internal_type_; - OP_REQUIRES_OK( - context, ValidateInput(context, indices_list_in, values_list_in, - shapes_list_in, dense_list_in, internal_type)); + ValidateInput(context, indices_list_in, values_list_in, + shapes_list_in, dense_list_in, internal_type)); OP_REQUIRES_OK(context, context->status()); std::vector>> columns = @@ -365,9 +364,11 @@ class SparseCrossOp : public OpKernel { // Validates indices_list_in OpInputList. for (int i = 0; i < size; i++) { if (check_type && indices_list_in[i].dtype() != DT_INT64) { - return errors::InvalidArgument("Input indices should be of type ", - DT_INT64, " but received ", - indices_list_in[i].dtype()); + OP_REQUIRES(context, false, + errors::InvalidArgument( + "Input indices should be of type ", + DT_INT64, " but received ", + indices_list_in[i].dtype())); } OP_REQUIRES( context, TensorShapeUtils::IsMatrix(indices_list_in[i].shape()), @@ -391,9 +392,11 @@ class SparseCrossOp : public OpKernel { // int64. if (check_type && internal_type == DT_STRING && values_list_in[i].dtype() == DT_INT64) { - return errors::InvalidArgument("Input values should be of internal type ", - internal_type, " but received ", - values_list_in[i].dtype()); + OP_REQUIRES(context, false, + errors::InvalidArgument( + "Input values should be of internal type ", + internal_type, " but received ", + values_list_in[i].dtype())); } OP_REQUIRES( context, TensorShapeUtils::IsVector(values_list_in[i].shape()), @@ -418,9 +421,11 @@ class SparseCrossOp : public OpKernel { const auto batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); for (int i = 0; i < size; i++) { if (check_type && shapes_list_in[i].dtype() != DT_INT64) { - return errors::InvalidArgument("Input shape should be of type ", DT_INT64, - " but received ", - shapes_list_in[i].dtype()); + OP_REQUIRES(context, false, + errors::InvalidArgument( + "Input shape should be of type ", + DT_INT64, " but received ", + shapes_list_in[i].dtype())); } OP_REQUIRES( context, TensorShapeUtils::IsVector(shapes_list_in[i].shape()), @@ -445,9 +450,11 @@ class SparseCrossOp : public OpKernel { // int64. if (check_type && internal_type == DT_STRING && dense_list_in[i].dtype() == DT_INT64) { - return errors::InvalidArgument("Dense inputs should be of internal type ", - internal_type, " but received ", - dense_list_in[i].dtype()); + OP_REQUIRES(context, false, + errors::InvalidArgument( + "Dense inputs should be of internal type ", + internal_type, " but received ", + dense_list_in[i].dtype())); } OP_REQUIRES( context, TensorShapeUtils::IsMatrix(dense_list_in[i].shape()), @@ -459,8 +466,6 @@ class SparseCrossOp : public OpKernel { " got ", dense_list_in[i].dim_size(0), " at dense tensor ", i)); } - - return Status::OK(); } // Calculate the batch size from either the shapes input or the dense input. From ccb067f6907d217413ad00e523c9b73456106b48 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 17:38:26 -0700 Subject: [PATCH 238/243] Update sparse_cross_op.cc --- tensorflow/core/kernels/sparse_cross_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 745f07003c831c..f9428c7711adf2 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -351,8 +351,8 @@ class SparseCrossOp : public OpKernel { private: // Validates input tensors. - Status ValidateInput(OpKernelContext* context, - const OpInputList& indices_list_in, + void ValidateInput(OpKernelContext* context, + const OpInputList& indices_list_in, const OpInputList& values_list_in, const OpInputList& shapes_list_in, const OpInputList& dense_list_in, From a89066c3b5ac9e728597fb6215d92cb931309d94 Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Fri, 4 Jun 2021 17:39:06 -0700 Subject: [PATCH 239/243] Update sparse_cross_op.cc --- tensorflow/core/kernels/sparse_cross_op.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index f9428c7711adf2..9533f282782c55 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -353,10 +353,10 @@ class SparseCrossOp : public OpKernel { // Validates input tensors. void ValidateInput(OpKernelContext* context, const OpInputList& indices_list_in, - const OpInputList& values_list_in, - const OpInputList& shapes_list_in, - const OpInputList& dense_list_in, - const DataType& internal_type) { + const OpInputList& values_list_in, + const OpInputList& shapes_list_in, + const OpInputList& dense_list_in, + const DataType& internal_type) { const auto size = indices_list_in.size(); // Only perform internal_type check for SparseCrossOp. // Check if the internal_type is not invalid before doing so. From acba599cdb2cb504b3faf3f292be7fc7896679f6 Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Mon, 7 Jun 2021 12:08:33 -0700 Subject: [PATCH 240/243] Update sparse_cross_op.cc --- tensorflow/core/kernels/sparse_cross_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 9533f282782c55..1e68a6bcadcdc8 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -311,7 +311,7 @@ class SparseCrossOp : public OpKernel { context->input_list("dense_inputs", &dense_list_in)); DataType internal_type = internal_type_; ValidateInput(context, indices_list_in, values_list_in, - shapes_list_in, dense_list_in, internal_type)); + shapes_list_in, dense_list_in, internal_type); OP_REQUIRES_OK(context, context->status()); std::vector>> columns = From a6e8a8843776adf38c0c0bed4488b97cd8a78f5d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Tue, 8 Jun 2021 08:14:02 -0700 Subject: [PATCH 241/243] Update sparse_fill_empty_rows_op.cc --- tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc index 55f0b577417785..c30adb7a137176 100644 --- a/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc +++ b/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc @@ -66,7 +66,7 @@ class SparseFillEmptyRowsOp : public OpKernel { default_value_t->shape().DebugString())); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. - OP_REQUIRES(context, dense_shape_t.NumElements() != 0, + OP_REQUIRES(context, dense_shape_t->NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty.")); const T& default_value = default_value_t->scalar()(); const auto indices = indices_t->matrix(); From eb18be899ddae6e54d8cd35ce7d833703fb7639d Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Tue, 8 Jun 2021 14:24:36 -0700 Subject: [PATCH 242/243] Update raw_ops_test.py --- tensorflow/python/ops/raw_ops_test.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index 089db57576c00e..814ab470929800 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -63,22 +63,6 @@ def testDefaults(self): gen_math_ops.Any(input=x, axis=0), gen_math_ops.Any(input=x, axis=0, keep_dims=False)) - @parameterized.parameters([[0, 8]], [[-1, 6]]) - def testStringNGramsBadDataSplits(self, splits): - data = ["aa", "bb", "cc", "dd", "ee", "ff"] - with self.assertRaisesRegex(errors.InvalidArgumentError, - "Invalid split value"): - self.evaluate( - gen_string_ops.string_n_grams( - data=data, - data_splits=splits, - separator="", - ngram_widths=[2], - left_pad="", - right_pad="", - pad_width=0, - preserve_short_sequences=False)) - def testGetSessionHandle(self): if context.executing_eagerly(): with self.assertRaisesRegex( From e2bc66d8f0481bc70135684b273c388fd92a8c10 Mon Sep 17 00:00:00 2001 From: geetachavan1 <53313357+geetachavan1@users.noreply.github.com> Date: Tue, 8 Jun 2021 15:58:44 -0700 Subject: [PATCH 243/243] Update raw_ops_test.py --- tensorflow/python/ops/raw_ops_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py index 814ab470929800..76250102245180 100644 --- a/tensorflow/python/ops/raw_ops_test.py +++ b/tensorflow/python/ops/raw_ops_test.py @@ -27,7 +27,6 @@ from tensorflow.python.framework import test_util from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops -from tensorflow.python.ops import gen_string_ops from tensorflow.python.platform import test