Update artifactId for TensorFlow Hadoop and spark-connector jars

This commit is contained in:
Soila Kavulya 2018-05-30 19:23:08 -07:00
parent 4a4eb47e61
commit b0ec8d2c46
5 changed files with 14 additions and 12 deletions

View File

@ -53,10 +53,10 @@ There are seven artifacts and thus `pom.xml`s involved in this release:
7. [`parentpom`](https://maven.apache.org/pom/index.html): Common settings 7. [`parentpom`](https://maven.apache.org/pom/index.html): Common settings
shared by all of the above. shared by all of the above.
8. `tensorflow-hadoop`: The TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop. 8. `hadoop`: The TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop.
The source code for this package is available in the [TensorFlow Ecosystem](https://github.com/tensorflow/ecosystem/tree/master/hadoop) The source code for this package is available in the [TensorFlow Ecosystem](https://github.com/tensorflow/ecosystem/tree/master/hadoop)
9. `spark-tensorflow-connector`: A Scala library for loading and storing TensorFlow TFRecord 9. `spark-connector`: A Scala library for loading and storing TensorFlow TFRecord
using Apache Spark DataFrames. The source code for this package is available using Apache Spark DataFrames. The source code for this package is available
in the [TensorFlow Ecosystem](https://github.com/tensorflow/ecosystem/tree/master/spark/spark-tensorflow-connector) in the [TensorFlow Ecosystem](https://github.com/tensorflow/ecosystem/tree/master/spark/spark-tensorflow-connector)

View File

@ -5,7 +5,7 @@
<!-- Placeholder pom which is replaced by TensorFlow ecosystem Hadoop pom during build --> <!-- Placeholder pom which is replaced by TensorFlow ecosystem Hadoop pom during build -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<description>TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop</description> <description>TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop</description>
<artifactId>tensorflow-hadoop</artifactId> <artifactId>hadoop</artifactId>
<packaging>jar</packaging> <packaging>jar</packaging>
<scm> <scm>
@ -21,4 +21,4 @@
<version>1.8.0</version> <version>1.8.0</version>
<relativePath>../</relativePath> <relativePath>../</relativePath>
</parent> </parent>
</project> </project>

View File

@ -32,8 +32,8 @@
<module>libtensorflow_jni_gpu</module> <module>libtensorflow_jni_gpu</module>
<module>tensorflow</module> <module>tensorflow</module>
<module>proto</module> <module>proto</module>
<module>tensorflow-hadoop</module> <module>hadoop</module>
<module>spark-tensorflow-connector</module> <module>spark-connector</module>
</modules> </modules>
<!-- Two profiles are used: <!-- Two profiles are used:

View File

@ -48,7 +48,7 @@ clean() {
mvn -q clean mvn -q clean
rm -rf libtensorflow_jni/src libtensorflow_jni/target libtensorflow_jni_gpu/src libtensorflow_jni_gpu/target \ rm -rf libtensorflow_jni/src libtensorflow_jni/target libtensorflow_jni_gpu/src libtensorflow_jni_gpu/target \
libtensorflow/src libtensorflow/target tensorflow-android/target \ libtensorflow/src libtensorflow/target tensorflow-android/target \
tensorflow-hadoop/src spark-tensorflow-connector/src hadoop/src spark-connector/src
} }
update_version_in_pom() { update_version_in_pom() {
@ -193,8 +193,8 @@ generate_java_protos() {
# is updated for each module. # is updated for each module.
download_tf_ecosystem() { download_tf_ecosystem() {
ECOSYSTEM_DIR="/tmp/tensorflow-ecosystem" ECOSYSTEM_DIR="/tmp/tensorflow-ecosystem"
HADOOP_DIR="${DIR}/tensorflow-hadoop" HADOOP_DIR="${DIR}/hadoop"
SPARK_DIR="${DIR}/spark-tensorflow-connector" SPARK_DIR="${DIR}/spark-connector"
# Clean any previous attempts # Clean any previous attempts
rm -rf "${ECOSYSTEM_DIR}" rm -rf "${ECOSYSTEM_DIR}"
@ -203,6 +203,8 @@ download_tf_ecosystem() {
mkdir -p "${ECOSYSTEM_DIR}" mkdir -p "${ECOSYSTEM_DIR}"
cd "${ECOSYSTEM_DIR}" cd "${ECOSYSTEM_DIR}"
git clone "${TF_ECOSYSTEM_URL}" git clone "${TF_ECOSYSTEM_URL}"
cd ecosystem
git checkout r${TF_VERSION}
# Copy the TensorFlow Hadoop source # Copy the TensorFlow Hadoop source
cp -r "${ECOSYSTEM_DIR}/ecosystem/hadoop/src" "${HADOOP_DIR}" cp -r "${ECOSYSTEM_DIR}/ecosystem/hadoop/src" "${HADOOP_DIR}"
@ -279,7 +281,7 @@ cd "${DIR}"
# Comment lines out appropriately if debugging/tinkering with the release # Comment lines out appropriately if debugging/tinkering with the release
# process. # process.
# gnupg2 is required for signing # gnupg2 is required for signing
apt-get -qq update && apt-get -qqq install -y gnupg2 && apt-get -qqq install -y git apt-get -qq update && apt-get -qqq install -y gnupg2 git
clean clean
update_version_in_pom update_version_in_pom

View File

@ -5,7 +5,7 @@
<!-- Placeholder pom which is replaced by TensorFlow ecosystem Spark pom during build --> <!-- Placeholder pom which is replaced by TensorFlow ecosystem Spark pom during build -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<description>TensorFlow TFRecord connector for Apache Spark DataFrames</description> <description>TensorFlow TFRecord connector for Apache Spark DataFrames</description>
<artifactId>spark-tensorflow-connector</artifactId> <artifactId>spark-connector</artifactId>
<packaging>jar</packaging> <packaging>jar</packaging>
<scm> <scm>
@ -21,4 +21,4 @@
<version>1.8.0</version> <version>1.8.0</version>
<relativePath>../</relativePath> <relativePath>../</relativePath>
</parent> </parent>
</project> </project>