diff --git a/RELEASE.md b/RELEASE.md index 1a037ce595a88a..e491cc33c5fc94 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,13 @@ +# Release 1.6.1 + +## Bug Fixes and Other Changes +* Fixes the following potential security vulnerabilities: + * The TensorFlow Lite TOCO compiler did not perform correct boundary checks when reading from some fields within TFLite files. + * The block size in meta file could contain a large int64 value which causes an integer overflow upon addition. Subsequent code using n as index may cause an out-of-bounds read. + * TensorFlow checkpoint meta file uses Google's [https://github.com/google/snappy](snappy) compression/decompression library. There is a memcpy-param-overlap issue in the version of snappy currently used by TensorFlow. + * A maliciously crafted configuration file passed into the TensorFlow XLA compiler could cause an invalid memory access and/or a heap buffer overflow. + + # Release 1.6.0 ## Breaking Changes diff --git a/tensorflow/compiler/tf2xla/tf2xla_test.cc b/tensorflow/compiler/tf2xla/tf2xla_test.cc index a9978e697b0917..b813668a9edd3a 100644 --- a/tensorflow/compiler/tf2xla/tf2xla_test.cc +++ b/tensorflow/compiler/tf2xla/tf2xla_test.cc @@ -90,6 +90,11 @@ TEST(ConvertGraphDefToXla, Sum) { TF_EXPECT_OK(result_or.status()); std::unique_ptr result = std::move(result_or.ValueOrDie()); EXPECT_EQ("(s32[]) (\n42\n)", result->ToString()); + + config.mutable_feed(0)->mutable_id()->set_output_index( + 123); /* invalid output_index */ + EXPECT_TRUE(errors::IsInvalidArgument( + ConvertGraphDefToXla(graph_def, config, client, &computation))); } } // namespace diff --git a/tensorflow/compiler/tf2xla/tf2xla_util.cc b/tensorflow/compiler/tf2xla/tf2xla_util.cc index f428a194328935..2fc77cc4bc536a 100644 --- a/tensorflow/compiler/tf2xla/tf2xla_util.cc +++ b/tensorflow/compiler/tf2xla/tf2xla_util.cc @@ -151,8 +151,15 @@ Status AddPlaceholdersForFeeds( Status status; Node* feed_node = g.AddNode(gd.node(0), &status); TF_RETURN_IF_ERROR(status); - info.data_type = - BaseType(feed_node->output_type(info.feed->id().output_index())); + + if (info.feed->id().output_index() < feed_node->num_outputs()) { + info.data_type = + BaseType(feed_node->output_type(info.feed->id().output_index())); + } else { + return errors::InvalidArgument( + "Invalid output_index ", info.feed->id().output_index(), + " for feed node ", info.feed->id().node_name()); + } } } diff --git a/tensorflow/core/kernels/decode_bmp_op.cc b/tensorflow/core/kernels/decode_bmp_op.cc index b7d120a617849b..b4dcf0a74b336e 100644 --- a/tensorflow/core/kernels/decode_bmp_op.cc +++ b/tensorflow/core/kernels/decode_bmp_op.cc @@ -91,15 +91,32 @@ class DecodeBmpOp : public OpKernel { errors::InvalidArgument( "Number of channels must be 1, 3 or 4, was ", channels_)); + OP_REQUIRES(context, width > 0 && header_size >= 0, + errors::InvalidArgument("Width must be positive")); + OP_REQUIRES(context, header_size >= 0, + errors::InvalidArgument("header size must be nonnegative")); + + // The real requirement is < 2^31 minus some headers and channel data, + // so rounding down to something that's still ridiculously big. + OP_REQUIRES( + context, + (static_cast(width) * std::abs(static_cast(height))) < + static_cast(std::numeric_limits::max() / 8), + errors::InvalidArgument( + "Total possible pixel bytes must be less than 2^30")); + + const int32 abs_height = abs(height); + // there may be padding bytes when the width is not a multiple of 4 bytes // 8 * channels == bits per pixel const int row_size = (8 * channels_ * width + 31) / 32 * 4; - const int last_pixel_offset = - header_size + (abs(height) - 1) * row_size + (width - 1) * channels_; + const int64 last_pixel_offset = static_cast(header_size) + + (abs_height - 1) * row_size + + (width - 1) * channels_; // [expected file size] = [last pixel offset] + [last pixel size=channels] - const int expected_file_size = last_pixel_offset + channels_; + const int64 expected_file_size = last_pixel_offset + channels_; OP_REQUIRES( context, (expected_file_size <= input.size()), @@ -115,12 +132,12 @@ class DecodeBmpOp : public OpKernel { Tensor* output = nullptr; OP_REQUIRES_OK( context, context->allocate_output( - 0, TensorShape({abs(height), width, channels_}), &output)); + 0, TensorShape({abs_height, width, channels_}), &output)); const uint8* bmp_pixels = &img_bytes[header_size]; Decode(bmp_pixels, row_size, output->flat().data(), width, - abs(height), channels_, top_down); + abs_height, channels_, top_down); } uint8* Decode(const uint8* input, const int row_size, uint8* const output, diff --git a/tensorflow/core/kernels/fuzzing/BUILD b/tensorflow/core/kernels/fuzzing/BUILD index 41af950d7deca5..9a7eca03ce276d 100644 --- a/tensorflow/core/kernels/fuzzing/BUILD +++ b/tensorflow/core/kernels/fuzzing/BUILD @@ -43,6 +43,8 @@ tf_ops_fuzz_target_lib("decode_base64") tf_ops_fuzz_target_lib("encode_jpeg") +tf_ops_fuzz_target_lib("decode_bmp") + tf_ops_fuzz_target_lib("decode_png") tf_ops_fuzz_target_lib("decode_jpeg") diff --git a/tensorflow/core/kernels/fuzzing/decode_bmp_fuzz.cc b/tensorflow/core/kernels/fuzzing/decode_bmp_fuzz.cc new file mode 100644 index 00000000000000..01c56ac6f67223 --- /dev/null +++ b/tensorflow/core/kernels/fuzzing/decode_bmp_fuzz.cc @@ -0,0 +1,29 @@ +/* Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/cc/ops/standard_ops.h" +#include "tensorflow/core/kernels/fuzzing/fuzz_session.h" + +namespace tensorflow { +namespace fuzzing { + +class FuzzDecodeBmp : public FuzzStringInputOp { + SINGLE_INPUT_OP_BUILDER(DT_STRING, DecodeBmp); +}; + +STANDARD_TF_FUZZ_FUNCTION(FuzzDecodeBmp); + +} // end namespace fuzzing +} // end namespace tensorflow diff --git a/tensorflow/core/platform/macros.h b/tensorflow/core/platform/macros.h index 6119edfd5a63d1..1b1faed7033ff5 100644 --- a/tensorflow/core/platform/macros.h +++ b/tensorflow/core/platform/macros.h @@ -67,11 +67,18 @@ limitations under the License. #define TF_EXPORT __attribute__((visibility("default"))) #endif // COMPILER_MSVC -// GCC can be told that a certain branch is not likely to be taken (for -// instance, a CHECK failure), and use that information in static analysis. -// Giving it this information can help it optimize for the common case in -// the absence of better information (ie. -fprofile-arcs). -#if defined(COMPILER_GCC3) +#ifdef __has_builtin +#define TF_HAS_BUILTIN(x) __has_builtin(x) +#else +#define TF_HAS_BUILTIN(x) 0 +#endif + +// Compilers can be told that a certain branch is not likely to be taken +// (for instance, a CHECK failure), and use that information in static +// analysis. Giving it this information can help it optimize for the +// common case in the absence of better information (ie. +// -fprofile-arcs). +#if TF_HAS_BUILTIN(__builtin_expect) || (defined(__GNUC__) && __GNUC__ >= 3) #define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0)) #define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) #else diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 22f2c02b78b0b0..349f797a4581c5 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -20,7 +20,7 @@ limitations under the License. #define TF_MAJOR_VERSION 1 #define TF_MINOR_VERSION 6 -#define TF_PATCH_VERSION 0 +#define TF_PATCH_VERSION 1 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", // "-beta", "-rc", "-rc.1") diff --git a/tensorflow/docs_src/get_started/get_started_for_beginners.md b/tensorflow/docs_src/get_started/get_started_for_beginners.md index 446bae4b896b3e..4cb0fb952fcd23 100644 --- a/tensorflow/docs_src/get_started/get_started_for_beginners.md +++ b/tensorflow/docs_src/get_started/get_started_for_beginners.md @@ -333,7 +333,7 @@ interpret data is such a rich topic that we devote an entire From a code perspective, you build a list of `feature_column` objects by calling functions from the @{tf.feature_column} module. Each object describes an input to the model. To tell the model to interpret data as a floating-point value, -call @{tf.feature_column.numeric_column). In `premade_estimator.py`, all +call @{tf.feature_column.numeric_column}. In `premade_estimator.py`, all four features should be interpreted as literal floating-point values, so the code to create a feature column looks as follows: diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md index 1a151ec75822a4..6957969e906bf6 100644 --- a/tensorflow/docs_src/install/install_c.md +++ b/tensorflow/docs_src/install/install_c.md @@ -38,7 +38,7 @@ enable TensorFlow for C: OS="linux" # Change to "darwin" for macOS TARGET_DIRECTORY="/usr/local" curl -L \ - "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.6.0.tar.gz" | + "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.6.1.tar.gz" | sudo tar -C $TARGET_DIRECTORY -xz The `tar` command extracts the TensorFlow C library into the `lib` diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md index bc874c034dc492..1ea6604e1e8c56 100644 --- a/tensorflow/docs_src/install/install_go.md +++ b/tensorflow/docs_src/install/install_go.md @@ -38,7 +38,7 @@ steps to install this library and enable TensorFlow for Go: TF_TYPE="cpu" # Change to "gpu" for GPU support TARGET_DIRECTORY='/usr/local' curl -L \ - "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.6.0.tar.gz" | + "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.6.1.tar.gz" | sudo tar -C $TARGET_DIRECTORY -xz The `tar` command extracts the TensorFlow C library into the `lib` diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md index 313de2049a3131..69b56ed7108377 100644 --- a/tensorflow/docs_src/install/install_java.md +++ b/tensorflow/docs_src/install/install_java.md @@ -36,7 +36,7 @@ following to the project's `pom.xml` to use the TensorFlow Java APIs: org.tensorflow tensorflow - 1.6.0 + 1.6.1 ``` @@ -65,7 +65,7 @@ As an example, these steps will create a Maven project that uses TensorFlow: org.tensorflow tensorflow - 1.6.0 + 1.6.1 @@ -123,12 +123,12 @@ instead: org.tensorflow libtensorflow - 1.6.0 + 1.6.1 org.tensorflow libtensorflow_jni_gpu - 1.6.0 + 1.6.1 ``` @@ -147,7 +147,7 @@ refer to the simpler instructions above instead. Take the following steps to install TensorFlow for Java on Linux or macOS: 1. Download - [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.6.0.jar), + [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.6.1.jar), which is the TensorFlow Java Archive (JAR). 2. Decide whether you will run TensorFlow for Java on CPU(s) only or with @@ -166,7 +166,7 @@ Take the following steps to install TensorFlow for Java on Linux or macOS: OS=$(uname -s | tr '[:upper:]' '[:lower:]') mkdir -p ./jni curl -L \ - "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.6.0.tar.gz" | + "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.6.1.tar.gz" | tar -xz -C ./jni ### Install on Windows @@ -174,10 +174,10 @@ Take the following steps to install TensorFlow for Java on Linux or macOS: Take the following steps to install TensorFlow for Java on Windows: 1. Download - [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.6.0.jar), + [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.6.1.jar), which is the TensorFlow Java Archive (JAR). 2. Download the following Java Native Interface (JNI) file appropriate for - [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.6.0.zip). + [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.6.1.zip). 3. Extract this .zip file. @@ -225,7 +225,7 @@ must be part of your `classpath`. For example, you can include the downloaded `.jar` in your `classpath` by using the `-cp` compilation flag as follows: -
javac -cp libtensorflow-1.6.0.jar HelloTF.java
+
javac -cp libtensorflow-1.6.1.jar HelloTF.java
### Running @@ -239,11 +239,11 @@ two files are available to the JVM: For example, the following command line executes the `HelloTF` program on Linux and macOS X: -
java -cp libtensorflow-1.6.0.jar:. -Djava.library.path=./jni HelloTF
+
java -cp libtensorflow-1.6.1.jar:. -Djava.library.path=./jni HelloTF
And the following command line executes the `HelloTF` program on Windows: -
java -cp libtensorflow-1.6.0.jar;. -Djava.library.path=jni HelloTF
+
java -cp libtensorflow-1.6.1.jar;. -Djava.library.path=jni HelloTF
If the program prints Hello from version, you've successfully installed TensorFlow for Java and are ready to use the API. If the program diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md index 5382c9db3164b8..4659b702b205a5 100644 --- a/tensorflow/docs_src/install/install_linux.md +++ b/tensorflow/docs_src/install/install_linux.md @@ -41,7 +41,8 @@ must be installed on your system: [NVIDIA's documentation](https://developer.nvidia.com/cudnn). Ensure that you create the `CUDA_HOME` environment variable as described in the NVIDIA documentation. - * GPU card with CUDA Compute Capability 3.0 or higher. See + * GPU card with CUDA Compute Capability 3.0 or higher for building + from source and 3.5 or higher for our binaries. See [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of supported GPU cards. * The libcupti-dev library, which is the NVIDIA CUDA Profile Tools Interface. @@ -188,7 +189,7 @@ Take the following steps to install TensorFlow with Virtualenv: Virtualenv environment:
(tensorflow)$ pip3 install --upgrade \
-     https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp34-cp34m-linux_x86_64.whl
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp34-cp34m-linux_x86_64.whl If you encounter installation problems, see [Common Installation Problems](#common_installation_problems). @@ -293,7 +294,7 @@ take the following steps:
      $ sudo pip3 install --upgrade \
-     https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp34-cp34m-linux_x86_64.whl
+     https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp34-cp34m-linux_x86_64.whl
      
If this step fails, see @@ -480,7 +481,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
      (tensorflow)$ pip install --ignore-installed --upgrade \
-     https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp34-cp34m-linux_x86_64.whl
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp34-cp34m-linux_x86_64.whl @@ -648,14 +649,14 @@ This section documents the relevant values for Linux installations. CPU only:
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp27-none-linux_x86_64.whl
 
GPU support:
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.1-cp27-none-linux_x86_64.whl
 
Note that GPU support requires the NVIDIA hardware and software described in @@ -667,14 +668,14 @@ Note that GPU support requires the NVIDIA hardware and software described in CPU only:
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp34-cp34m-linux_x86_64.whl
 
GPU support:
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.1-cp34-cp34m-linux_x86_64.whl
 
Note that GPU support requires the NVIDIA hardware and software described in @@ -686,14 +687,14 @@ Note that GPU support requires the NVIDIA hardware and software described in CPU only:
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp35-cp35m-linux_x86_64.whl
 
GPU support:
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.1-cp35-cp35m-linux_x86_64.whl
 
@@ -705,14 +706,14 @@ Note that GPU support requires the NVIDIA hardware and software described in CPU only:
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.6.1-cp36-cp36m-linux_x86_64.whl
 
GPU support:
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.1-cp36-cp36m-linux_x86_64.whl
 
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md index 62f896375faa43..3e6220969dd4f1 100644 --- a/tensorflow/docs_src/install/install_mac.md +++ b/tensorflow/docs_src/install/install_mac.md @@ -115,7 +115,7 @@ Take the following steps to install TensorFlow with Virtualenv: TensorFlow in the active Virtualenv is as follows:
 $ pip3 install --upgrade \
-     https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py3-none-any.whl
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.1-py3-none-any.whl If you encounter installation problems, see [Common Installation Problems](#common-installation-problems). @@ -238,7 +238,7 @@ take the following steps: issue the following command:
 $ sudo pip3 install --upgrade \
-     https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py3-none-any.whl 
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.1-py3-none-any.whl If the preceding command fails, see [installation problems](#common-installation-problems). @@ -347,7 +347,7 @@ Take the following steps to install TensorFlow in an Anaconda environment: TensorFlow for Python 2.7:
 (targetDirectory)$ pip install --ignore-installed --upgrade \
-     https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py2-none-any.whl
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.1-py2-none-any.whl @@ -520,7 +520,7 @@ This section documents the relevant values for Mac OS installations.
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.1-py2-none-any.whl
 
@@ -528,5 +528,5 @@ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py2-none-any.
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.0-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.6.1-py3-none-any.whl
 
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md index 638a64cc15f1e3..d2d04e97cee885 100644 --- a/tensorflow/docs_src/install/install_sources.md +++ b/tensorflow/docs_src/install/install_sources.md @@ -359,10 +359,10 @@ Invoke `pip install` to install that pip package. The filename of the `.whl` file depends on your platform. For example, the following command will install the pip package -for TensorFlow 1.6.0 on Linux: +for TensorFlow 1.6.1 on Linux:
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-1.6.0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-1.6.1-py2-none-any.whl
 
## Validate your installation diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md index 87e1a715aa0e80..a837c7dac40dd0 100644 --- a/tensorflow/docs_src/install/install_windows.md +++ b/tensorflow/docs_src/install/install_windows.md @@ -17,7 +17,7 @@ You must choose one of the following types of TensorFlow to install: NVIDIA® GPU, you must install this version. Note that this version of TensorFlow is typically much easier to install (typically, in 5 or 10 minutes), so even if you have an NVIDIA GPU, we recommend - installing this version first. + installing this version first. Prebuilt binaries will use AVX instructions. * **TensorFlow with GPU support**. TensorFlow programs typically run significantly faster on a GPU than on a CPU. Therefore, if your system has a NVIDIA® GPU meeting the prerequisites shown below @@ -41,7 +41,8 @@ installed on your system: Note that cuDNN is typically installed in a different location from the other CUDA DLLs. Ensure that you add the directory where you installed the cuDNN DLL to your `%PATH%` environment variable. - * GPU card with CUDA Compute Capability 3.0 or higher. See + * GPU card with CUDA Compute Capability 3.0 or higher for building + from source and 3.5 or higher for our binaries. See [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of supported GPU cards. diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py index 0e5f696111ae7f..cbba112841c0fb 100644 --- a/tensorflow/python/framework/tensor_util.py +++ b/tensorflow/python/framework/tensor_util.py @@ -557,7 +557,7 @@ def MakeNdarray(tensor): dtype = tensor_dtype.as_numpy_dtype if tensor.tensor_content: - return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape) + return np.frombuffer(tensor.tensor_content, dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.float16: # the half_val field of the TensorProto stores the binary representation # of the fp16: we need to reinterpret this as a proper float16 diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh index 71744c04f2f432..0c4cde5b0f2d5d 100755 --- a/tensorflow/tools/ci_build/install/install_pip_packages.sh +++ b/tensorflow/tools/ci_build/install/install_pip_packages.sh @@ -21,6 +21,9 @@ set -e easy_install -U pip easy_install3 -U pip +pip2 install --upgrade setuptools==39.1.0 +pip3 install --upgrade setuptools==39.1.0 + # Install pip packages from whl files to avoid the time-consuming process of # building from source. diff --git a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh index aefc49f6048214..0844c489806fe9 100755 --- a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh +++ b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh @@ -39,6 +39,9 @@ if [[ -z $pip35_version ]]; then fi set -e +pip3.5 install --upgrade setuptools==39.1.0 +pip3.5 install --upgrade pip + pip3.5 install --upgrade virtualenv # Install six. diff --git a/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh index bfaa044c82887b..387fbde0a60ecd 100755 --- a/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh +++ b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh @@ -49,6 +49,9 @@ cd Python-3.6.1 make altinstall ln -s /usr/local/bin/pip3.6 /usr/local/bin/pip3 +pip3 install --upgrade setuptools==39.1.0 +pip3 install --upgrade pip + pip3 install --upgrade virtualenv set -e diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 8510a4260ea95e..2a0d0456108c7b 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -29,7 +29,7 @@ # This version string is semver compatible, but incompatible with pip. # For pip, we will remove all '-' characters from this string, and use the # result for pip. -_VERSION = '1.6.0' +_VERSION = '1.6.1' REQUIRED_PACKAGES = [ 'absl-py >= 0.1.6', diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 70cb65f3e7716b..8eae1706bd7cf3 100644 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -233,11 +233,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): tf_http_archive( name = "png_archive", urls = [ - "https://mirror.bazel.build/github.com/glennrp/libpng/archive/v1.2.53.tar.gz", - "https://github.com/glennrp/libpng/archive/v1.2.53.tar.gz", + "https://mirror.bazel.build/github.com/glennrp/libpng/archive/v1.6.34.tar.gz", + "https://github.com/glennrp/libpng/archive/v1.6.34.tar.gz", ], - sha256 = "716c59c7dfc808a4c368f8ada526932be72b2fcea11dd85dc9d88b1df1dfe9c2", - strip_prefix = "libpng-1.2.53", + sha256 = "e45ce5f68b1d80e2cb9a2b601605b374bdf51e1798ef1c2c2bd62131dfcf9eef", + strip_prefix = "libpng-1.6.34", build_file = str(Label("//third_party:png.BUILD")), ) @@ -516,11 +516,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): tf_http_archive( name = "zlib_archive", urls = [ - "https://mirror.bazel.build/zlib.net/zlib-1.2.8.tar.gz", - "http://zlib.net/fossils/zlib-1.2.8.tar.gz", + "https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz", + "https://zlib.net/zlib-1.2.11.tar.gz", ], - sha256 = "36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d", - strip_prefix = "zlib-1.2.8", + sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1", + strip_prefix = "zlib-1.2.11", build_file = str(Label("//third_party:zlib.BUILD")), ) @@ -537,11 +537,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): tf_http_archive( name = "snappy", urls = [ - "https://mirror.bazel.build/github.com/google/snappy/archive/1.1.4.tar.gz", - "https://github.com/google/snappy/archive/1.1.4.tar.gz", + "https://mirror.bazel.build/github.com/google/snappy/archive/1.1.7.tar.gz", + "https://github.com/google/snappy/archive/1.1.7.tar.gz", ], - sha256 = "2f7504c73d85bac842e893340333be8cb8561710642fc9562fccdd9d2c3fcc94", - strip_prefix = "snappy-1.1.4", + sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4", + strip_prefix = "snappy-1.1.7", build_file = str(Label("//third_party:snappy.BUILD")), ) @@ -687,11 +687,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): tf_http_archive( name = "bazel_toolchains", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/f3b09700fae5d7b6e659d7cefe0dcc6e8498504c.tar.gz", - "https://github.com/bazelbuild/bazel-toolchains/archive/f3b09700fae5d7b6e659d7cefe0dcc6e8498504c.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz", ], - sha256 = "ed829b5eea8af1f405f4cc3d6ecfc3b1365bb7843171036030a31b5127002311", - strip_prefix = "bazel-toolchains-f3b09700fae5d7b6e659d7cefe0dcc6e8498504c", + strip_prefix = "bazel-toolchains-44200e0c026d86c53470d107b3697a3e46469c43", + sha256 = "699b55a6916c687f4b7dc092dbbf5f64672cde0dc965f79717735ec4e5416556", ) tf_http_archive( diff --git a/third_party/jpeg/jpeg.BUILD b/third_party/jpeg/jpeg.BUILD index ca2d38d6878cee..931bdc5991f42e 100644 --- a/third_party/jpeg/jpeg.BUILD +++ b/third_party/jpeg/jpeg.BUILD @@ -526,12 +526,12 @@ config_setting( config_setting( name = "armeabi-v7a", - values = {"android_cpu": "armeabi-v7a"}, + values = {"cpu": "armeabi-v7a"}, ) config_setting( name = "arm64-v8a", - values = {"android_cpu": "arm64-v8a"}, + values = {"cpu": "arm64-v8a"}, ) config_setting( diff --git a/third_party/png.BUILD b/third_party/png.BUILD index 6a7ad719aa7d02..76ab32d69c3505 100644 --- a/third_party/png.BUILD +++ b/third_party/png.BUILD @@ -9,15 +9,20 @@ cc_library( name = "png", srcs = [ "png.c", + "pngdebug.h", "pngerror.c", "pngget.c", + "pnginfo.h", + "pnglibconf.h", "pngmem.c", "pngpread.c", + "pngpriv.h", "pngread.c", "pngrio.c", "pngrtran.c", "pngrutil.c", "pngset.c", + "pngstruct.h", "pngtrans.c", "pngwio.c", "pngwrite.c", @@ -33,3 +38,10 @@ cc_library( visibility = ["//visibility:public"], deps = ["@zlib_archive//:zlib"], ) + +genrule( + name = "snappy_stubs_public_h", + srcs = ["scripts/pnglibconf.h.prebuilt"], + outs = ["pnglibconf.h"], + cmd = "sed -e 's/PNG_ZLIB_VERNUM 0/PNG_ZLIB_VERNUM 0x12b0/' $< >$@", +) diff --git a/third_party/snappy.BUILD b/third_party/snappy.BUILD index fd48ed8941e159..cc11f52d0eb3e0 100644 --- a/third_party/snappy.BUILD +++ b/third_party/snappy.BUILD @@ -4,25 +4,12 @@ licenses(["notice"]) # BSD 3-Clause exports_files(["COPYING"]) -config_setting( - name = "windows", - values = {"cpu": "x64_windows"}, - visibility = ["//visibility:public"], -) - -config_setting( - name = "windows_msvc", - values = {"cpu": "x64_windows_msvc"}, - visibility = ["//visibility:public"], -) - cc_library( name = "snappy", srcs = [ + "config.h", "snappy.cc", "snappy.h", - "snappy-c.cc", - "snappy-c.h", "snappy-internal.h", "snappy-sinksource.cc", "snappy-sinksource.h", @@ -32,30 +19,85 @@ cc_library( ], hdrs = ["snappy.h"], copts = select({ - ":windows": [], - ":windows_msvc": [], + "@org_tensorflow//tensorflow:windows": [ + "/DHAVE_CONFIG_H", + "/EHsc", + ], + "@org_tensorflow//tensorflow:windows_msvc": [ + "/DHAVE_CONFIG_H", + "/EHsc", + ], "//conditions:default": [ + "-DHAVE_CONFIG_H", + "-fno-exceptions", + "-Wno-sign-compare", "-Wno-shift-negative-value", "-Wno-implicit-function-declaration", ], }), ) +genrule( + name = "config_h", + outs = ["config.h"], + cmd = "\n".join([ + "cat <<'EOF' >$@", + "#define HAVE_STDDEF_H 1", + "#define HAVE_STDINT_H 1", + "", + "#ifdef __has_builtin", + "# if !defined(HAVE_BUILTIN_EXPECT) && __has_builtin(__builtin_expect)", + "# define HAVE_BUILTIN_EXPECT 1", + "# endif", + "# if !defined(HAVE_BUILTIN_CTZ) && __has_builtin(__builtin_ctzll)", + "# define HAVE_BUILTIN_CTZ 1", + "# endif", + "#elif defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 4)", + "# ifndef HAVE_BUILTIN_EXPECT", + "# define HAVE_BUILTIN_EXPECT 1", + "# endif", + "# ifndef HAVE_BUILTIN_CTZ", + "# define HAVE_BUILTIN_CTZ 1", + "# endif", + "#endif", + "", + "#ifdef __has_include", + "# if !defined(HAVE_BYTESWAP_H) && __has_include()", + "# define HAVE_BYTESWAP_H 1", + "# endif", + "# if !defined(HAVE_UNISTD_H) && __has_include()", + "# define HAVE_UNISTD_H 1", + "# endif", + "# if !defined(HAVE_SYS_ENDIAN_H) && __has_include()", + "# define HAVE_SYS_ENDIAN_H 1", + "# endif", + "# if !defined(HAVE_SYS_MMAN_H) && __has_include()", + "# define HAVE_SYS_MMAN_H 1", + "# endif", + "# if !defined(HAVE_SYS_UIO_H) && __has_include()", + "# define HAVE_SYS_UIO_H 1", + "# endif", + "#endif", + "", + "#ifndef SNAPPY_IS_BIG_ENDIAN", + "# ifdef __s390x__", + "# define SNAPPY_IS_BIG_ENDIAN 1", + "# elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__", + "# define SNAPPY_IS_BIG_ENDIAN 1", + "# endif", + "#endif", + "EOF", + ]), +) + genrule( name = "snappy_stubs_public_h", srcs = ["snappy-stubs-public.h.in"], outs = ["snappy-stubs-public.h"], cmd = ("sed " + - "-e 's/@ac_cv_have_stdint_h@/1/g' " + - "-e 's/@ac_cv_have_stddef_h@/1/g' " + - "-e 's/@ac_cv_have_stdint_h@/1/g' " + - select({ - "@org_tensorflow//tensorflow:windows": "-e 's/@ac_cv_have_sys_uio_h@/0/g' ", - "@org_tensorflow//tensorflow:windows_msvc": "-e 's/@ac_cv_have_sys_uio_h@/0/g' ", - "//conditions:default": "-e 's/@ac_cv_have_sys_uio_h@/1/g' ", - }) + - "-e 's/@SNAPPY_MAJOR@/1/g' " + - "-e 's/@SNAPPY_MINOR@/1/g' " + - "-e 's/@SNAPPY_PATCHLEVEL@/4/g' " + + "-e 's/$${\\(.*\\)_01}/\\1/g' " + + "-e 's/$${SNAPPY_MAJOR}/1/g' " + + "-e 's/$${SNAPPY_MINOR}/1/g' " + + "-e 's/$${SNAPPY_PATCHLEVEL}/4/g' " + "$< >$@"), ) diff --git a/third_party/zlib.BUILD b/third_party/zlib.BUILD index d164ee719c1fa4..e8048dd98adcca 100644 --- a/third_party/zlib.BUILD +++ b/third_party/zlib.BUILD @@ -2,18 +2,6 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) # BSD/MIT-like license (for zlib) -config_setting( - name = "windows", - values = {"cpu": "x64_windows"}, - visibility = ["//visibility:public"], -) - -config_setting( - name = "windows_msvc", - values = {"cpu": "x64_windows_msvc"}, - visibility = ["//visibility:public"], -) - cc_library( name = "zlib", srcs = [ @@ -45,8 +33,8 @@ cc_library( ], hdrs = ["zlib.h"], copts = select({ - ":windows": [], - ":windows_msvc": [], + "@org_tensorflow//tensorflow:windows": [], + "@org_tensorflow//tensorflow:windows_msvc": [], "//conditions:default": [ "-Wno-shift-negative-value", "-DZ_HAVE_UNISTD_H",