1 Star 0 Fork 0

Mr.li/tensorflow

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
configure 25.22 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
#!/usr/bin/env bash
set -e
set -o pipefail
# Find out the absolute path to where ./configure resides
pushd `dirname $0` > /dev/null
SOURCE_BASE_DIR=`pwd -P`
popd > /dev/null
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_linux() {
if [[ "${PLATFORM}" == "linux" ]]; then
true
else
false
fi
}
function is_macos() {
if [[ "${PLATFORM}" == "darwin" ]]; then
true
else
false
fi
}
function is_windows() {
# On windows, the shell script is actually running in msys
if [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]]; then
true
else
false
fi
}
function sed_in_place() {
sed -e $1 $2 > "$2.bak"
mv "$2.bak" $2
}
function write_to_bazelrc() {
echo "$1" >> .tf_configure.bazelrc
}
function write_action_env_to_bazelrc() {
write_to_bazelrc "build --action_env $1=\"$2\""
}
function python_path {
"$PYTHON_BIN_PATH" - <<END
from __future__ import print_function
import site
import os
try:
input = raw_input
except NameError:
pass
python_paths = []
if os.getenv('PYTHONPATH') is not None:
python_paths = os.getenv('PYTHONPATH').split(':')
try:
library_paths = site.getsitepackages()
except AttributeError:
from distutils.sysconfig import get_python_lib
library_paths = [get_python_lib()]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
print(",".join(paths))
END
}
function setup_python {
## Set up python-related environment settings:
while true; do
fromuser=""
if [ -z "$PYTHON_BIN_PATH" ]; then
default_python_bin_path=$(which python || which python3 || true)
read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
fromuser="1"
if [ -z "$PYTHON_BIN_PATH" ]; then
PYTHON_BIN_PATH=$default_python_bin_path
fi
fi
if [ -e "$PYTHON_BIN_PATH" ]; then
break
fi
echo "Invalid python path. ${PYTHON_BIN_PATH} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
PYTHON_BIN_PATH=""
# Retry
done
if [ -z "$PYTHON_LIB_PATH" ]; then
# Split python_path into an array of paths, this allows path containing spaces
IFS=','
python_lib_path=($(python_path))
unset IFS
if [ 1 = "$USE_DEFAULT_PYTHON_LIB_PATH" ]; then
PYTHON_LIB_PATH=${python_lib_path[0]}
echo "Using python library path: $PYTHON_LIB_PATH"
else
echo "Found possible Python library paths:"
for x in "${python_lib_path[@]}"; do
echo " $x"
done
set -- "${python_lib_path[@]}"
echo "Please input the desired Python library path to use. Default is ["$1"]"
read b || true
if [ "$b" == "" ]; then
PYTHON_LIB_PATH=${python_lib_path[0]}
echo "Using python library path: $PYTHON_LIB_PATH"
else
PYTHON_LIB_PATH="$b"
fi
fi
fi
if [ ! -x "$PYTHON_BIN_PATH" ] || [ -d "$PYTHON_BIN_PATH" ]; then
echo "PYTHON_BIN_PATH is not executable. Is it the python binary?"
exit 1
fi
local python_major_version=$("${PYTHON_BIN_PATH}" -c 'from __future__ import print_function; import sys; print(sys.version_info[0]);')
if [ "$python_major_version" == "" ]; then
echo -e "\n\nERROR: Problem getting python version. Is $PYTHON_BIN_PATH the correct python binary?"
exit 1
fi
# Convert python path to Windows style before writing into bazel.rc
if is_windows; then
PYTHON_BIN_PATH="$(cygpath -m "$PYTHON_BIN_PATH")"
fi
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc "PYTHON_BIN_PATH" "$PYTHON_BIN_PATH"
write_action_env_to_bazelrc "PYTHON_LIB_PATH" "$PYTHON_LIB_PATH"
write_to_bazelrc "build --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\""
write_to_bazelrc "build --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\""
write_to_bazelrc "build --force_python=py$python_major_version"
write_to_bazelrc "build --host_force_python=py$python_major_version"
write_to_bazelrc "build --python${python_major_version}_path=\"$PYTHON_BIN_PATH\""
write_to_bazelrc "test --force_python=py$python_major_version"
write_to_bazelrc "test --host_force_python=py$python_major_version"
write_to_bazelrc "test --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\""
write_to_bazelrc "test --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\""
write_to_bazelrc "run --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\""
write_to_bazelrc "run --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\""
# Write tools/python_bin_path.sh
echo "export PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\"" > tools/python_bin_path.sh
}
# This file contains customized config settings.
rm -f .tf_configure.bazelrc
touch .tf_configure.bazelrc
touch .bazelrc
sed_in_place "/tf_configure/d" .bazelrc
echo "import %workspace%/.tf_configure.bazelrc" >> .bazelrc
# Delete any leftover BUILD files from the Makefile build, which would interfere
# with Bazel parsing.
MAKEFILE_DOWNLOAD_DIR=tensorflow/contrib/makefile/downloads
if [ -d "${MAKEFILE_DOWNLOAD_DIR}" ]; then
find ${MAKEFILE_DOWNLOAD_DIR} -type f -name '*BUILD' -delete
fi
setup_python
## Set up MKL related environment settings
while [ "$TF_NEED_MKL" == "" ]; do
fromuser=""
read -p "Do you wish to build TensorFlow with MKL support? [y/N] " INPUT
fromuser="1"
case $INPUT in
[Yy]* ) echo "MKL support will be enabled for TensorFlow"; TF_NEED_MKL=1;;
[Nn]* ) echo "No MKL support will be enabled for TensorFlow"; TF_NEED_MKL=0;;
"" ) echo "No MKL support will be enabled for TensorFlow"; TF_NEED_MKL=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
OSNAME=`uname -s`
if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
while [ "$TF_DOWNLOAD_MKL" == "" ]; do
fromuser=""
read -p "Do you wish to download MKL LIB from the web? [Y/n] " INPUT
fromuser="1"
case $INPUT in
[Yy]* ) TF_DOWNLOAD_MKL=1;;
[Nn]* ) TF_DOWNLOAD_MKL=0;;
"" ) TF_DOWNLOAD_MKL=1;;
* ) echo "Invalid selection: " $INPUT; exit 1;;
esac
done
if [[ "$TF_DOWNLOAD_MKL" == "1" ]]; then
DST=`dirname $0`
ARCHIVE_BASENAME=mklml_lnx_2018.0.20170425.tgz
GITHUB_RELEASE_TAG=v0.7
MKLURL="https://github.com/01org/mkl-dnn/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME"
if ! [ -e "${DST}/third_party/mkl/${ARCHIVE_BASENAME}" ]; then
curl -fSsL -o "${DST}/third_party/mkl/${ARCHIVE_BASENAME}" "${MKLURL}"
fi
tar -xzf $DST/third_party/mkl/$ARCHIVE_BASENAME -C $DST/third_party/mkl/
extracted_dir_name="${ARCHIVE_BASENAME%.*}"
MKL_INSTALL_PATH=$DST/third_party/mkl/$extracted_dir_name
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
else
default_mkl_path=/opt/intel/mklml
fromuser=""
read -p "Please specify the location where MKL is installed. [Default is $default_mkl_path]: " MKL_INSTALL_PATH
fromuser="1"
if [ -z "$MKL_INSTALL_PATH" ]; then
MKL_INSTALL_PATH=$default_mkl_path
fi
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
# Going through one more level of expansion to handle that.
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
fi
if [ "$OSNAME" == "Linux" ]; then
# Full MKL configuration
MKL_RT_LIB_PATH="lib/intel64/libmkl_rt.so" #${TF_MKL_EXT}#TODO version?
MKL_RT_OMP_LIB_PATH="../compiler/lib/intel64/libiomp5.so" #TODO VERSION?
# MKL-ML configuration
MKL_ML_LIB_PATH="lib/libmklml_intel.so" #${TF_MKL_EXT}#TODO version?
MKL_ML_OMP_LIB_PATH="lib/libiomp5.so" #TODO VERSION?
elif [ "$OSNAME" == "Darwin" ]; then
echo "Darwin is unsupported yet";
exit 1
fi
if [ -e "$MKL_INSTALL_PATH/${MKL_ML_LIB_PATH}" ]; then
ln -sf $MKL_INSTALL_PATH/${MKL_ML_LIB_PATH} third_party/mkl/
ln -sf $MKL_INSTALL_PATH/${MKL_ML_OMP_LIB_PATH} third_party/mkl/
ln -sf $MKL_INSTALL_PATH/include third_party/mkl/
ln -sf $MKL_INSTALL_PATH/include third_party/eigen3/mkl_include
loc=$(locate -e libdl.so.2 | sed -n 1p)
ln -sf $loc third_party/mkl/libdl.so.2
elif [ -e "$MKL_INSTALL_PATH/${MKL_RT_LIB_PATH}" ]; then
ln -sf $MKL_INSTALL_PATH/${MKL_RT_LIB_PATH} third_party/mkl/
ln -sf $MKL_INSTALL_PATH/${MKL_RT_OMP_LIB_PATH} third_party/mkl/
ln -sf $MKL_INSTALL_PATH/include third_party/mkl/
ln -sf $MKL_INSTALL_PATH/include third_party/eigen3/mkl_include
loc=$(locate -e libdl.so.2 | sed -n 1p)
ln -sf $loc third_party/mkl/libdl.so.2
else
echo "ERROR: $MKL_INSTALL_PATH/${MKL_ML_LIB_PATH} nor $MKL_INSTALL_PATH/${MKL_RT_LIB_PATH} exists";
exit 1
fi
cat > third_party/mkl/mkl.config <<EOF
# MKL_INSTALL_PATH refers to the location of MKL root folder. The MKL header and library
# files can be either in this directory, or under include/ and lib64/
MKL_INSTALL_PATH=$MKL_INSTALL_PATH
EOF
fi # TF_NEED_MKL
## End MKL setup
## Set up architecture-dependent optimization flags.
if [ -z "$CC_OPT_FLAGS" ]; then
default_cc_opt_flags="-march=native"
read -p "Please specify optimization flags to use during compilation when bazel option "\
"\"--config=opt\" is specified [Default is $default_cc_opt_flags]: " CC_OPT_FLAGS
if [ -z "$CC_OPT_FLAGS" ]; then
CC_OPT_FLAGS=$default_cc_opt_flags
fi
fi
if is_windows; then
TF_NEED_GCP=0
TF_NEED_HDFS=0
TF_NEED_JEMALLOC=0
TF_NEED_OPENCL=0
TF_CUDA_CLANG=0
fi
if is_linux; then
while [ "$TF_NEED_JEMALLOC" == "" ]; do
read -p "Do you wish to use jemalloc as the malloc implementation? [Y/n] "\
INPUT
case $INPUT in
[Yy]* ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;;
[Nn]* ) echo "jemalloc disabled"; TF_NEED_JEMALLOC=0;;
"" ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
else
TF_NEED_JEMALLOC=0
fi
if [[ "$TF_NEED_JEMALLOC" == "1" ]]; then
write_to_bazelrc 'build --define with_jemalloc=true'
fi
while [[ "$TF_NEED_GCP" == "" ]]; do
read -p "Do you wish to build TensorFlow with "\
"Google Cloud Platform support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=1;;
[Nn]* ) echo "No Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=0;;
"" ) echo "No Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
if [[ "$TF_NEED_GCP" == "1" ]]; then
write_to_bazelrc 'build --define with_gcp_support=true'
fi
while [[ "$TF_NEED_HDFS" == "" ]]; do
read -p "Do you wish to build TensorFlow with "\
"Hadoop File System support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=1;;
[Nn]* ) echo "No Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=0;;
"" ) echo "No Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
if [[ "$TF_NEED_HDFS" == "1" ]]; then
write_to_bazelrc 'build --define with_hdfs_support=true'
fi
## Enable XLA.
while [[ "$TF_ENABLE_XLA" == "" ]]; do
read -p "Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=1;;
[Nn]* ) echo "No XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;;
"" ) echo "No XLA support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
if [[ "$TF_ENABLE_XLA" == "1" ]]; then
write_to_bazelrc 'build --define with_xla_support=true'
fi
# Verbs configuration
while [ "$TF_NEED_VERBS" == "" ]; do
read -p "Do you wish to build TensorFlow with "\
"VERBS support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "VERBS support will be enabled for "\
"TensorFlow"; TF_NEED_VERBS=1;;
[Nn]* ) echo "No VERBS support will be enabled for "\
"TensorFlow"; TF_NEED_VERBS=0;;
"" ) echo "No VERBS support will be enabled for "\
"TensorFlow"; TF_NEED_VERBS=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
if [[ "$TF_NEED_VERBS" == "1" ]]; then
write_to_bazelrc 'build --define with_verbs_support=true'
fi
# Append CC optimization flags to bazel.rc
for opt in $CC_OPT_FLAGS; do
write_to_bazelrc "build:opt --cxxopt=$opt --copt=$opt"
done
# Run the gen_git_source to create links where bazel can track dependencies for
# git hash propagation
GEN_GIT_SOURCE=tensorflow/tools/git/gen_git_source.py
chmod a+x ${GEN_GIT_SOURCE}
"${PYTHON_BIN_PATH}" ${GEN_GIT_SOURCE} --configure "${SOURCE_BASE_DIR}"
## Set up SYCL-related environment settings
while [ "$TF_NEED_OPENCL" == "" ]; do
read -p "Do you wish to build TensorFlow with OpenCL support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=1;;
[Nn]* ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;;
"" ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
## Set up Cuda-related environment settings
while [ "$TF_NEED_CUDA" == "" ]; do
read -p "Do you wish to build TensorFlow with CUDA support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=1;;
[Nn]* ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
"" ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
export TF_NEED_CUDA
write_action_env_to_bazelrc "TF_NEED_CUDA" "$TF_NEED_CUDA"
export TF_NEED_OPENCL
write_action_env_to_bazelrc "TF_NEED_OPENCL" "$TF_NEED_OPENCL"
if [ "$TF_NEED_CUDA" == "1" ]; then
while [[ "$TF_CUDA_CLANG" == "" ]]; do
read -p "Do you want to use clang as CUDA compiler? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "Clang will be used as CUDA compiler"; TF_CUDA_CLANG=1;;
[Nn]* ) echo "nvcc will be used as CUDA compiler"; TF_CUDA_CLANG=0;;
"" ) echo "nvcc will be used as CUDA compiler"; TF_CUDA_CLANG=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
export TF_CUDA_CLANG
write_action_env_to_bazelrc "TF_CUDA_CLANG" "$TF_CUDA_CLANG"
# Set up which clang we should use as the cuda / host compiler.
while [[ "$TF_CUDA_CLANG" == "1" ]] && true; do
fromuser=""
if [ -z "$CLANG_CUDA_COMPILER_PATH" ]; then
default_clang_host_compiler_path=$(which clang || true)
read -p "Please specify which clang should be used as device and host compiler. [Default is $default_clang_host_compiler_path]: " CLANG_CUDA_COMPILER_PATH
fromuser="1"
if [ -z "$CLANG_CUDA_COMPILER_PATH" ]; then
CLANG_CUDA_COMPILER_PATH="$default_clang_host_compiler_path"
fi
fi
if [ -e "$CLANG_CUDA_COMPILER_PATH" ]; then
export CLANG_CUDA_COMPILER_PATH
write_action_env_to_bazelrc "CLANG_CUDA_COMPILER_PATH" "$CLANG_CUDA_COMPILER_PATH"
break
fi
echo "Invalid clang path. ${CLANG_CUDA_COMPILER_PATH} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
CLANG_CUDA_COMPILER_PATH=""
# Retry
done
# Find out where the CUDA toolkit is installed
while true; do
# Configure the Cuda SDK version to use.
if [ -z "$TF_CUDA_VERSION" ]; then
read -p "Please specify the CUDA SDK version you want to use, e.g. 7.0. [Leave empty to use system default]: " TF_CUDA_VERSION
fi
fromuser=""
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
default_cuda_path=/usr/local/cuda
if is_windows; then
if [ -z "$CUDA_PATH" ]; then
default_cuda_path="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0"
else
default_cuda_path="$(cygpath -m "$CUDA_PATH")"
fi
elif is_linux; then
# If the default doesn't exist, try an alternative default.
if [ ! -d $default_cuda_path ] && [ -d /opt/cuda ]; then
default_cuda_path=/opt/cuda
fi
fi
read -p "Please specify the location where CUDA $TF_CUDA_VERSION toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH
fromuser="1"
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
CUDA_TOOLKIT_PATH="$default_cuda_path"
fi
fi
if [[ -z "$TF_CUDA_VERSION" ]]; then
TF_CUDA_EXT=""
else
TF_CUDA_EXT=".$TF_CUDA_VERSION"
fi
if is_windows; then
CUDA_RT_LIB_PATH="lib/x64/cudart.lib"
elif is_linux; then
CUDA_RT_LIB_PATH="lib64/libcudart.so${TF_CUDA_EXT}"
elif is_macos; then
CUDA_RT_LIB_PATH="lib/libcudart${TF_CUDA_EXT}.dylib"
fi
if [ -e "${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH}" ]; then
export CUDA_TOOLKIT_PATH
write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "$CUDA_TOOLKIT_PATH"
export TF_CUDA_VERSION
write_action_env_to_bazelrc "TF_CUDA_VERSION" "$TF_CUDA_VERSION"
break
fi
echo "Invalid path to CUDA $TF_CUDA_VERSION toolkit. ${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH} cannot be found"
if [ -z "$fromuser" ]; then
exit 1
fi
# Retry
TF_CUDA_VERSION=""
CUDA_TOOLKIT_PATH=""
done
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
while [[ "$TF_CUDA_CLANG" != "1" ]] && ! is_windows && true; do
fromuser=""
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
default_gcc_host_compiler_path=$(which gcc || true)
cuda_bin_symlink="$CUDA_TOOLKIT_PATH/bin/gcc"
if [ -L "$cuda_bin_symlink" ]; then
default_gcc_host_compiler_path=$(readlink $cuda_bin_symlink)
fi
read -p "Please specify which gcc should be used by nvcc as the host compiler. [Default is $default_gcc_host_compiler_path]: " GCC_HOST_COMPILER_PATH
fromuser="1"
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
GCC_HOST_COMPILER_PATH="$default_gcc_host_compiler_path"
fi
fi
if [ -e "$GCC_HOST_COMPILER_PATH" ]; then
export GCC_HOST_COMPILER_PATH
write_action_env_to_bazelrc "GCC_HOST_COMPILER_PATH" "$GCC_HOST_COMPILER_PATH"
break
fi
echo "Invalid gcc path. ${GCC_HOST_COMPILER_PATH} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
GCC_HOST_COMPILER_PATH=""
# Retry
done
# Find out where the cuDNN library is installed
while true; do
# Configure the cuDNN version to use.
if [ -z "$TF_CUDNN_VERSION" ]; then
read -p "Please specify the cuDNN version you want to use. [Leave empty to use system default]: " TF_CUDNN_VERSION
fi
fromuser=""
if [ -z "$CUDNN_INSTALL_PATH" ]; then
default_cudnn_path=${CUDA_TOOLKIT_PATH}
read -p "Please specify the location where cuDNN $TF_CUDNN_VERSION library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH
fromuser="1"
if [ -z "$CUDNN_INSTALL_PATH" ]; then
CUDNN_INSTALL_PATH=$default_cudnn_path
fi
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
# Going through one more level of expansion to handle that.
CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
fi
if [[ -z "$TF_CUDNN_VERSION" ]]; then
TF_CUDNN_EXT=""
else
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
fi
if is_windows; then
CUDA_DNN_LIB_PATH="lib/x64/cudnn.lib"
CUDA_DNN_LIB_ALT_PATH="lib/x64/cudnn.lib"
elif is_linux; then
CUDA_DNN_LIB_PATH="lib64/libcudnn.so${TF_CUDNN_EXT}"
CUDA_DNN_LIB_ALT_PATH="libcudnn.so${TF_CUDNN_EXT}"
elif is_macos; then
CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}.dylib"
CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}.dylib"
fi
if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" -o -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then
export TF_CUDNN_VERSION
write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION"
export CUDNN_INSTALL_PATH
write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "$CUDNN_INSTALL_PATH"
break
fi
if is_linux; then
if ! type ldconfig > /dev/null 2>&1; then
LDCONFIG_BIN=/sbin/ldconfig
else
LDCONFIG_BIN=ldconfig
fi
CUDNN_PATH_FROM_LDCONFIG="$($LDCONFIG_BIN -p | sed -n 's/.*libcudnn.so .* => \(.*\)/\1/p')"
if [ -e "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" ]; then
export TF_CUDNN_VERSION
write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION"
export CUDNN_INSTALL_PATH="$(dirname ${CUDNN_PATH_FROM_LDCONFIG})"
write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "$CUDNN_INSTALL_PATH"
break
fi
fi
echo "Invalid path to cuDNN ${CUDNN_VERSION} toolkit. Neither of the following two files can be found:"
echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_PATH}"
echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_ALT_PATH}"
if is_linux; then
echo "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}"
fi
if [ -z "$fromuser" ]; then
exit 1
fi
# Retry
TF_CUDNN_VERSION=""
CUDNN_INSTALL_PATH=""
done
# Configure the compute capabilities that TensorFlow builds for.
# Since Cuda toolkit is not backward-compatible, this is not guaranteed to work.
while true; do
fromuser=""
default_cuda_compute_capabilities="3.5,5.2"
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
cat << EOF
Please specify a list of comma-separated Cuda compute capabilities you want to build with.
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
Please note that each additional compute capability significantly increases your build time and binary size.
EOF
read -p "[Default is: \"3.5,5.2\"]: " TF_CUDA_COMPUTE_CAPABILITIES
fromuser=1
fi
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
TF_CUDA_COMPUTE_CAPABILITIES=$default_cuda_compute_capabilities
fi
# Check whether all capabilities from the input is valid
COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ }
ALL_VALID=1
for CAPABILITY in $COMPUTE_CAPABILITIES; do
if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then
echo "Invalid compute capability: " $CAPABILITY
ALL_VALID=0
break
fi
done
if [ "$ALL_VALID" == "0" ]; then
if [ -z "$fromuser" ]; then
exit 1
fi
else
export TF_CUDA_COMPUTE_CAPABILITIES
write_action_env_to_bazelrc "TF_CUDA_COMPUTE_CAPABILITIES" "$TF_CUDA_COMPUTE_CAPABILITIES"
break
fi
TF_CUDA_COMPUTE_CAPABILITIES=""
done
if is_windows; then
# The following three variables are needed for MSVC toolchain configuration in Bazel
export CUDA_PATH="$CUDA_TOOLKIT_PATH"
export CUDA_COMPUTE_CAPABILITIES="$TF_CUDA_COMPUTE_CAPABILITIES"
export NO_WHOLE_ARCHIVE_OPTION=1
write_action_env_to_bazelrc "CUDA_PATH" "$CUDA_PATH"
write_action_env_to_bazelrc "CUDA_COMPUTE_CAPABILITIES" "$CUDA_COMPUTE_CAPABILITIES"
write_action_env_to_bazelrc "NO_WHOLE_ARCHIVE_OPTION" "1"
fi
# end of if "$TF_NEED_CUDA" == "1"
fi
# OpenCL configuration
if [ "$TF_NEED_OPENCL" == "1" ]; then
# Determine which C++ compiler should be used as the host compiler
while true; do
fromuser=""
if [ -z "$HOST_CXX_COMPILER" ]; then
default_cxx_host_compiler=$(which clang++-3.6 || true)
read -p "Please specify which C++ compiler should be used as the host C++ compiler. [Default is $default_cxx_host_compiler]: " HOST_CXX_COMPILER
fromuser="1"
if [ -z "$HOST_CXX_COMPILER" ]; then
HOST_CXX_COMPILER=$default_cxx_host_compiler
fi
fi
if [ -e "$HOST_CXX_COMPILER" ]; then
export HOST_CXX_COMPILER
write_action_env_to_bazelrc "HOST_CXX_COMPILER" "$HOST_CXX_COMPILER"
break
fi
echo "Invalid C++ compiler path. ${HOST_CXX_COMPILER} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
HOST_CXX_COMPILER=""
# Retry
done
# Determine which C compiler should be used as the host compiler
while true; do
fromuser=""
if [ -z "$HOST_C_COMPILER" ]; then
default_c_host_compiler=$(which clang-3.6 || true)
read -p "Please specify which C compiler should be used as the host C compiler. [Default is $default_c_host_compiler]: " HOST_C_COMPILER
fromuser="1"
if [ -z "$HOST_C_COMPILER" ]; then
HOST_C_COMPILER=$default_c_host_compiler
fi
fi
if [ -e "$HOST_C_COMPILER" ]; then
export HOST_C_COMPILER
write_action_env_to_bazelrc "HOST_C_COMPILER" "$HOST_C_COMPILER"
break
fi
echo "Invalid C compiler path. ${HOST_C_COMPILER} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
HOST_C_COMPILER=""
# Retry
done
while true; do
# Configure the OPENCL version to use.
TF_OPENCL_VERSION="1.2"
# Point to ComputeCpp root
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
default_computecpp_toolkit_path=/usr/local/computecpp
read -p "Please specify the location where ComputeCpp for SYCL $TF_OPENCL_VERSION is installed. [Default is $default_computecpp_toolkit_path]: " COMPUTECPP_TOOLKIT_PATH
fromuser="1"
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
COMPUTECPP_TOOLKIT_PATH=$default_computecpp_toolkit_path
fi
fi
if is_linux; then
SYCL_RT_LIB_PATH="lib/libComputeCpp.so"
fi
if [ -e "${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH}" ]; then
export COMPUTECPP_TOOLKIT_PATH
write_action_env_to_bazelrc "COMPUTECPP_TOOLKIT_PATH" "$COMPUTECPP_TOOLKIT_PATH"
break
fi
echo "Invalid SYCL $TF_OPENCL_VERSION library path. ${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH} cannot be found"
if [ -z "$fromuser" ]; then
exit 1
fi
# Retry
TF_OPENCL_VERSION=""
COMPUTECPP_TOOLKIT_PATH=""
done
# end of if "$TF_NEED_OPENCL" == "1"
fi
# TODO(gunan): Remove once bazel correctly handles changes in remote repositories.
bazel clean
echo "Configuration finished"
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/yyzybb537/tensorflow.git
[email protected]:yyzybb537/tensorflow.git
yyzybb537
tensorflow
tensorflow
master

搜索帮助