diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7efee124df9..a56d64d08ce 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,15 +17,17 @@ env: jobs: check-format: if: github.event_name == 'pull_request' - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'adopt' java-version: '17' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 + with: + fetch-depth: 0 - name: Build project run: | gcc --version @@ -35,7 +37,7 @@ jobs: run: | mvn spotless:check -Pjdk17 -B -U -e prepare: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 outputs: repositoryUrl: ${{ steps.repository.outputs.repositoryUrl }} steps: @@ -49,32 +51,38 @@ jobs: https://oss.sonatype.org/service/local/staging/profiles/$STAGING_PROFILE_ID/start export STAGING_REPOSITORY_ID=`awk -F'[<>]' '/stagedRepositoryId/{print $3}' response.xml` echo "Staging repository created: $STAGING_REPOSITORY_ID" + echo "::set-output name=stagingRepositoryId::$STAGING_REPOSITORY_ID" - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Extract distribution repository URL id: repository run: | if [[ "${{ env.DEPLOY_RELEASE }}" = "true" ]]; then - export REPOSITORY_URL=`mvn exec:exec -q -N -Dexec.executable='echo' -Dexec.args="\\${project.distributionManagement.repository.url}" -DstagingRepositoryId=$STAGING_REPOSITORY_ID` + export REPOSITORY_URL=`mvn exec:exec -q -N -Dexec.executable='echo' -Dexec.args="\\${project.distributionManagement.repository.url}" -DstagingRepositoryId=${{ steps.staging.outputs.stagingRepositoryId }}` else export REPOSITORY_URL=`mvn exec:exec -q -N -Dexec.executable='echo' -Dexec.args="\\${project.distributionManagement.snapshotRepository.url}"` fi echo "Repository URL: $REPOSITORY_URL" echo "::set-output name=repositoryUrl::$REPOSITORY_URL" - linux-x86_64: - runs-on: ubuntu-20.04 + linux-arm64: + runs-on: ubuntu-2204-arm64-2c needs: prepare strategy: matrix: - ext: ["", -gpu] + ext: [""] steps: + - name: Install environment + run: | + sudo apt update + sudo apt install -y curl wget unzip tar git gcc g++ - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: - distribution: 'adopt' - java-version: '11' + distribution: 'zulu' + java-version: '17' + architecture: 'aarch64' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Build project run: | gcc --version @@ -84,44 +92,44 @@ jobs: - name: Deploy native artifact if: env.DEPLOY_RELEASE == 'true' || env.DEPLOY_SNAPSHOT == 'true' run: mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} - macosx-arm64: - runs-on: macos-14 + linux-x86_64: + runs-on: ubuntu-22.04 needs: prepare strategy: matrix: - ext: [""] + ext: ["", -gpu] steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: - distribution: 'zulu' - java-version: '17' - architecture: 'arm64' + distribution: 'adopt' + java-version: '11' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Build project run: | - clang --version + gcc --version mvn -version echo "ossrh${{ secrets.CI_DEPLOY_USERNAME }}${{ secrets.CI_DEPLOY_PASSWORD }}" > $HOME/.m2/settings.xml mvn clean install -pl '!tensorflow-framework' -B -U -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} - name: Deploy native artifact if: env.DEPLOY_RELEASE == 'true' || env.DEPLOY_SNAPSHOT == 'true' run: mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} - macosx-x86_64: - runs-on: macos-11 + macosx-arm64: + runs-on: macos-14 needs: prepare strategy: matrix: ext: [""] steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: - distribution: 'adopt' - java-version: '11' + distribution: 'zulu' + java-version: '17' + architecture: 'arm64' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Build project run: | clang --version @@ -132,7 +140,7 @@ jobs: if: env.DEPLOY_RELEASE == 'true' || env.DEPLOY_SNAPSHOT == 'true' run: mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} windows-x86_64: - runs-on: windows-2019 + runs-on: windows-2022 needs: prepare strategy: matrix: @@ -146,16 +154,16 @@ jobs: set "EXT=${{ matrix.ext }}" echo %JAVA_HOME% - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'adopt' java-version: '11' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Build project shell: cmd run: | - call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64 + call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64 set "PATH=C:\msys64\usr\bin;%PATH%" echo Shorten work paths to prevent Bazel from reaching MAX_PATH limit mkdir C:\tmp @@ -177,19 +185,18 @@ jobs: run: | call mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} if ERRORLEVEL 1 exit /b - deploy: if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/staging') }} # DEPLOY_SNAPSHOT (releases should be signed and deployed manually from local machine) - needs: [linux-x86_64, macosx-x86_64, windows-x86_64, macosx-arm64] - runs-on: ubuntu-20.04 + needs: [linux-x86_64, windows-x86_64, macosx-arm64, linux-arm64] + runs-on: ubuntu-22.04 steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'adopt' java-version: '11' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v6 - name: Build project run: | java -version diff --git a/.gitignore b/.gitignore index cb95fc014f9..d9e902d7d9e 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,6 @@ gradleBuild # Deployment Files settings.xml pom.xml.asc + +# Docs +docs/docs/apidocs/ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 90f248c4a3c..9da8b9603aa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,8 +15,7 @@ For dependencies, we can use anything compliant with [this list](https://opensou ## Building -To build all the artifacts locally, simply invoke the command `mvn install` at the root of this repository (or the Maven command of your choice). It is also -possible to build artifacts with support for CUDA® by adding the `-Djavacpp.platform.extension=-gpu` argument to the Maven command. +To build all the artifacts locally, simply invoke the command `mvn install` at the root of this repository (or the Maven command of your choice). ### JDK 16+ @@ -35,7 +34,7 @@ This can be done in `.mvn/jvm.config` or `MAVEN_OPTS`. ### Native Builds By default, the build will attempt to download the existing TensorFlow binaries from the web for the platform it is running on (so you need to have an active internet connection). -If such binaries are not available for your platform, you will need to build the TensorFlow runtime library from sources, by appending the `-Dnative.build` argument to your Maven +If such binaries are not available for your platform, you will need to build the TensorFlow runtime library from sources, by appending the `-Pnative-build` argument to your Maven command. This requires a valid environment for building TensorFlow, including the [bazel](https://bazel.build/) build tool and a few Python dependencies (please read [TensorFlow documentation](https://www.tensorflow.org/install/source) for more details). Note that building from sources can take multiple hours on a regular laptop. @@ -79,14 +78,15 @@ To upgrade the version of TensorFlow that is embedded within TensorFlow Java, pl ### Upgrading TensorFlow Native Library 1. Download locally the archive of the tensorflow release at https://github.com/tensorflow/tensorflow/archive/refs/tags/vX.X.X.tar.gz -2. Compute the SHA sum using the shell command `sha256sum ` +2. Compute the SHA sum using the shell command `shasum -a 256 ` 3. Update `urls`, `sha256` and `strip_prefix` fields of the `org_tensorflow` archive rule in Bazel [workspace](https://github.com/tensorflow/java/blob/master/tensorflow-core/tensorflow-core-native/WORKSPACE#L19) 4. Extract the archive in a temporary folder 5. Copy the content of `tensorflow-x.x.x/.bazelrc` file to `tensorflow-core/tensorflow-core-native/tensorflow.bazelrc` under TensorFlow Java source tree -6. Copy the content of `tensorflow-x.x.x/WORKSPACE` after the "###### Copy content of..." notice if `tensorflow-core/tensorflow-core-native/WORKSPACE`, read notice for more details +6. Copy the content of `tensorflow-x.x.x/WORKSPACE` after the "###### Copy content of..." notice to `tensorflow-core/tensorflow-core-native/WORKSPACE`, read notice for more details 7. Copy the content of `tensorflow-x.x.x/.bazelversion` file to `tensorflow-core/tensorflow-core-native/.bazelversion` 8. Validate that options in `tensorflow-core/tensorflow-core-native/.bazelrc` are still accurate or update them accordingly 9. Update URLs of existing TensorFlow binaries in the `tensorflow-core/tensorflow-core-native/scripts/dist_download` script +10. Update URLs of TensorFlow-Text binaries used for testing in the `tensorflow-core/tensorflow-core-api/scripts/test_download` script #### Patching TensorFlow Sources diff --git a/MIGRATING.md b/MIGRATING.md index 542f3a25e0d..ac7276eba99 100644 --- a/MIGRATING.md +++ b/MIGRATING.md @@ -3,6 +3,83 @@ TensorFlow Java is still in an alpha stage, therefore is subject to contain breaking changes between the different releases. This guide explain in detail how to migrate your code from a previous version to a new one that includes some changes that are not backward compatible. +## Migrating to 1.0.0 + +TensorFlow-Java 1.0.0 requires Java 11 or later. + +### Native Artifact Renaming + +The native artifacts, that used to be distributed as `tensorflow-core-api`, are now distributed under `tensorflow-core-native`. If you still add +`tensorflow-core-platform` in your project, that won't affect you. But if you were adding dependencies to specific native runtimes, you need to update +them to reflect the new artifact name. + +For example, +```xml + + org.tensorflow + tensorflow-core-api + 0.5.0 + + + org.tensorflow + tensorflow-core-api + 0.5.0 + linux-x86_64 + +``` +will now be +```xml + + org.tensorflow + tensorflow-core-api + 1.0.0 + + + org.tensorflow + tensorflow-core-native + 1.0.0 + linux-x86_64 + +``` +### Java Module Renaming + +The Java Module (jigsaw) names has been updated to drop the leading `org.`, as follow: +- `tensorflow-core-api` : `tensorflow` (was `org.tensorflow` before) +- `tensorflow-core-generator` : `tensorflow.generator` (was `org.tensorflow-generator` before) +- `tensorflow-core-native` : `tensorflow.nativelib` +- `tensorflow-framework` : `tensorflow.framework` (was `org.tensorflow.framework` before) + +### GPU Support + +Previous versions of TF Java were building a `tensorflow-core-platform-gpu` artifact upon which application could depend +on to include any TensorFlow native library that GPU support enabled. Since TensorFlow has removed its support of GPU +on all platforms other than Linux, we removed our platform JAR in favour of simply adding a dependency on the +`linux-x86_64-gpu` native artifact. +```xml + + org.tensorflow + tensorflow-core-native + 1.0.0 + linux-x86_64-gpu + +``` +Please note that including this dependency won't work if your application also depends on `tensorflow-core-platform`. If +you need to support more platforms than Linux, you should include the other `tensorflow-core-native` dependencies +separately (see the [README](README.md) file). + +### Session Run Result + +In versions before 0.4.0 `Session.Runner.run` and `TensorFunction.call` returned a `List`. In newer versions +they return a `Result` class which is `AutoCloseable` to make management of the tensor lifetime simpler. To migrate +users should wrap the `run` invocation in a try-with-resources statement rather than closing the output tensors +individually. + +### Proto Definitions Moved + +Some proto definitions under `org.tensorflow.proto` have been moved to a different location under the same (`org.tensorflow.proto`) package. +Certain classes have moved packages, for example, `org.tensorflow.proto.example.Feature` to `org.tensorflow.proto.Feature`. +You will need to reimport these proto bindings to match the new location. Your IDE should easily be able to do this for you. + ## Migrating to 0.3.0 ### Non-parameterized Typed Tensors diff --git a/README.md b/README.md index efb6e31be97..e1d1e080bcb 100644 --- a/README.md +++ b/README.md @@ -44,23 +44,26 @@ See [CONTRIBUTING.md](CONTRIBUTING.md#building). ## Using Maven Artifacts There are two options for adding TensorFlow Java as a dependency to your Maven project: with individual dependencies -for each targeted platforms or with a single dependency that target them all. +for each targeted platform or with a single dependency that targets them all. ### Individual dependencies With this option, you must first add a dependency to `tensorflow-core-api` and then one or multiple dependencies to `tensorflow-core-native` with a classifier targeting a specific platform. This option is preferred as -it minimize the size of your application by only including the TensorFlow builds you need, at the cost of being more +it minimizes the size of your application by only including the TensorFlow builds you need, at the cost of being more restrictive. While TensorFlow Java can be compiled for [multiple platforms](https://github.com/tensorflow/java/blob/master/tensorflow-core/pom.xml#L54), -only binaries for the followings are being **supported and distributed** by this project: +only binaries for the following are being **supported and distributed** by this project: -- `linux-x86_64`: Linux platforms on Intel chips -- `linux-x86_64-gpu`: Linux platforms on Intel chips with Cuda GPU support -- `macosx-x86_64`: MacOS X platforms on Intel chips +- `linux-x86_64`: Linux platforms on Intel/AMD chips +- `linux-x86_64-gpu`: Linux platforms on Intel/AMD chips with Cuda GPU support +- `linux-arm64`: Linux platforms on Arm chips - `macosx-arm64`: MacOS X platforms on Apple Silicon chips -- `windows-x86_64`: Windows platforms on Intel chips +- `windows-x86_64`: Windows platforms on Intel/AMD chips + +Binaries for `macosx-x86_64` are available for TF-Java 1.0 series releases and earlier, they were dropped from +TF-Java 1.1 and newer as they are no longer supported or released by Google. For example, for building a JAR that uses TensorFlow and is targeted to be deployed only on Linux systems with no GPU support, you should add the following dependencies: @@ -68,15 +71,21 @@ systems with no GPU support, you should add the following dependencies: org.tensorflow tensorflow-core-api - 0.5.0 + 1.1.0 org.tensorflow tensorflow-core-native - 0.5.0 + 1.1.0 linux-x86_64 ``` +Or Gradle: +```groovy +def tfVersion = '1.1.0' +implementation "org.tensorflow:tensorflow-core-api:$tfVersion" +implementation "org.tensorflow:tensorflow-core-native:$tfVersion:linux-x86_64" +``` On the other hand, if you plan to deploy your JAR on more platforms, you need additional native dependencies as follows: @@ -84,47 +93,63 @@ native dependencies as follows: org.tensorflow tensorflow-core-api - 0.5.0 + 1.1.0 org.tensorflow tensorflow-core-native - 0.5.0 + 1.1.0 linux-x86_64-gpu org.tensorflow tensorflow-core-native - 0.5.0 + 1.1.0 macosx-arm64 org.tensorflow tensorflow-core-native - 0.5.0 + 1.1.0 windows-x86_64 ``` +Or Gradle: +```groovy +def tfVersion = '1.1.0' +implementation "org.tensorflow:tensorflow-core-api:$tfVersion" +implementation "org.tensorflow:tensorflow-core-native:$tfVersion:linux-x86_64-gpu" +implementation "org.tensorflow:tensorflow-core-native:$tfVersion:macosx-arm64" +implementation "org.tensorflow:tensorflow-core-native:$tfVersion:windows-x86_64" +``` Only one dependency can be added per platform, meaning that you cannot add native dependencies to both `linux-x86_64` and `linux-x86_64-gpu` within the same project. +To use an NVIDIA GPU, you need to install the NVIDIA device driver, CUDA Toolkit, and cuDNN. +For Ubuntu 24.04, you can install them with the following command: +```sudo apt install -y nvidia-driver-550 nvidia-cuda-toolkit nvidia-cudnn``` + ### Single dependency In some cases, it might be preferable to add a single dependency that includes transitively all the artifacts required to run TensorFlow Java on any [supported platforms](README.md#individual-dependencies) -- `tensorflow-core-platform`: Includes `tensorflow-core-api`, plus native artifacts for `linux-x86_64`, `macosx-arm64`, `macosx-x86_64` and `windows-x86_64` +- `tensorflow-core-platform`: Includes `tensorflow-core-api`, plus native artifacts for `linux-x86_64`, `linux-x86_64-arm64`, `macosx-arm64` and `windows-x86_64` -For example, to run TensorFlow Java on any platform for which a binary is being distributed by this project, you can +For example, to run TensorFlow Java on any CPU platform for which a binary is being distributed by this project, you can simply add this dependency to your application: ```xml org.tensorflow tensorflow-core-platform - 0.5.0 + 1.1.0 ``` +Or Gradle: +```groovy +implementation "org.tensorflow:tensorflow-core-platform:1.1.0" +``` Be aware though that the builds of TensorFlow are quite voluminous and including too many native dependencies may significantly increase the size of your application. So it is good practice to limit your dependencies to @@ -135,7 +160,7 @@ the conventions established on this page: ### Snapshots Snapshots of TensorFlow Java artifacts are automatically distributed after each update in the code. To use them, you need -to add Sonatype OSS repository in your pom.xml, like the following +to add Sonatype OSS repository in your `pom.xml`, like the following ```xml @@ -152,28 +177,45 @@ to add Sonatype OSS repository in your pom.xml, like the following org.tensorflow tensorflow-core-platform - 1.0.0-SNAPSHOT + 1.2.0-SNAPSHOT ``` +Or Gradle: +```groovy +repositories { + mavenCentral() + maven { + url = uri("https://oss.sonatype.org/content/repositories/snapshots") + } +} + +dependencies { + // Example of dependency, see section above for more options + implementation "org.tensorflow:tensorflow-core-platform:1.2.0-SNAPSHOT" +} +``` ## TensorFlow/Java Version Support This table shows the mapping between TensorFlow, TensorFlow Java and minimum supported Java versions. | TensorFlow Java Version | TensorFlow Version | Minimum Java Version | -|-------------------------|--------------------| --------------- | -| 0.2.0 | 2.3.1 | 8 | -| 0.3.0 | 2.4.1 | 8 | -| 0.3.1 | 2.4.1 | 8 | -| 0.3.2 | 2.4.1 | 8 | -| 0.3.3 | 2.4.1 | 8 | -| 0.4.0 | 2.7.0 | 8 | -| 0.4.1 | 2.7.1 | 8 | -| 0.4.2 | 2.7.4 | 8 | -| 0.5.0 | 2.10.1 | 11 | -| 0.6.0-SNAPSHOT | 2.10.1 | 11 | -| 1.0.0-SNAPSHOT | 2.15.0 | 11 | +|-------------------------|--------------------|----------------------| +| 0.2.0 | 2.3.1 | 8 | +| 0.3.0 | 2.4.1 | 8 | +| 0.3.1 | 2.4.1 | 8 | +| 0.3.2 | 2.4.1 | 8 | +| 0.3.3 | 2.4.1 | 8 | +| 0.4.0 | 2.7.0 | 8 | +| 0.4.1 | 2.7.1 | 8 | +| 0.4.2 | 2.7.4 | 8 | +| 0.5.0 | 2.10.1 | 11 | +| 1.0.0-rc.1 | 2.16.1 | 11 | +| 1.0.0-rc.2 | 2.16.2 | 11 | +| 1.0.0 | 2.16.2 | 11 | +| 1.1.0 | 2.18.0 | 11 | +| 1.2.0-SNAPSHOT | 2.18.0 | 11 | ## How to Contribute? diff --git a/docs/_toc.yaml b/docs/_toc.yaml old mode 100644 new mode 100755 diff --git a/docs/docs/assets/tensorflow.svg b/docs/docs/assets/tensorflow.svg new file mode 100644 index 00000000000..c0778626d66 --- /dev/null +++ b/docs/docs/assets/tensorflow.svg @@ -0,0 +1 @@ + diff --git a/docs/index.md b/docs/docs/index.md old mode 100644 new mode 100755 similarity index 59% rename from docs/index.md rename to docs/docs/index.md index 47ad1385a1e..c9fcbf53e7e --- a/docs/index.md +++ b/docs/docs/index.md @@ -1,14 +1,5 @@ # TensorFlow for Java - - - -
- View on TensorFlow.org - - View GitHub repository -
- TensorFlow Java can run on any JVM for building, training and running machine learning models. It comes with a series of utilities and frameworks that help achieve most of the tasks common to data scientists and developers working in this domain. Java and other JVM languages, such as Scala or Kotlin, are @@ -26,21 +17,19 @@ migrated from Bazel to Maven, which is more familiar for most Java developers. The following describes the layout of the repository and its different artifacts: -* [tensorflow-core](https://github.com/tensorflow/java/tree/master/tensorflow-core) - * All artifacts that build up the core language bindings of TensorFlow for Java - * Intended audience: projects that provide their own APIs or frameworks on top of - TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM - -* [tensorflow-framework](https://github.com/tensorflow/java/tree/master/tensorflow-framework) - * Primary API for building and training neural networks with TensorFlow - * Intended audience: neural network developers +### [tensorflow-core](https://github.com/tensorflow/java/tree/master/tensorflow-core) + * **Intended audience**: developers who wants to deploy a TensorFlow model on a JVM for inference. Also for projects + that provide their own APIs or frameworks on top of TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM. + * All artifacts that make up the core language bindings of TensorFlow for Java. -* [ndarray](https://github.com/tensorflow/java-ndarray) - * Generic utility library for n-dimensional data I/O operations - * Used by TensorFlow but does not depend on TensorFlow - * Intended audience: any developer who needs a Java n-dimensional array implementation, whether or not they - use it with TensorFlow +### [tensorflow-framework](https://github.com/tensorflow/java/tree/master/tensorflow-framework) + * **Intended audience**: neural network developers. + * Primary API for building and training neural networks with TensorFlow. +### [ndarray](https://github.com/tensorflow/java-ndarray) + * **Intended audience**: any developer who needs a Java n-dimensional array implementation, whether or not they use it with TensorFlow. + * Generic utility library for n-dimensional data I/O operations. + * Used by TensorFlow but does not depend on TensorFlow. ## Communication diff --git a/docs/install.md b/docs/docs/install.md old mode 100644 new mode 100755 similarity index 85% rename from docs/install.md rename to docs/docs/install.md index 091bd1b4c01..2fe676e956a --- a/docs/install.md +++ b/docs/docs/install.md @@ -8,24 +8,30 @@ Kotlin, are frequently used in large and small enterprises all over the world, which makes TensorFlow Java a strategic choice for adopting machine learning at a large scale. -Caution: The TensorFlow Java API is *not* covered by the TensorFlow -[API stability guarantees](https://www.tensorflow.org/guide/versions). +Note: Starting from version 1.0.0, the TensorFlow Java project follows the +[TensorFlow API stability guarantees](https://www.tensorflow.org/guide/versions#api_stability). +However, as these bindings are downstream of the TensorFlow C API, users should +be aware that stability is subject to the evolution of the upstream TensorFlow core. ## Requirements -TensorFlow Java runs on Java 8 and above, and supports out-of-the-box the +TensorFlow Java runs on Java 11 and above, and supports out-of-the-box the following platforms: -* Ubuntu 16.04 or higher; 64-bit, x86 -* macOS 10.12.6 (Sierra) or higher; 64-bit, x86 -* Windows 7 or higher; 64-bit, x86 +* Ubuntu 20.04 or higher; 64-bit, x86 +* Ubuntu 22.04 or higher; 64-bit, arm +* macOS 14 or higher; 64-bit, arm +* Windows 10 or higher; 64-bit, x86 -*Note: To use TensorFlow on Android, see -[TensorFlow Lite](https://tensorflow.org/lite)* +TensorFlow Java 1.0 series and earlier releases also have binaries for: + +* macOS 12 or higher; 64-bit, x86 + +*Note: To use TensorFlow on Android, see [LiteRT](https://tensorflow.org/lite)* ## Versions -TensorFlow Java has its own release cycle, independent from the +TensorFlow Java has its own release cycle, independent of the [TensorFlow runtime](https://github.com/tensorflow/tensorflow). Consequently, its version does not match the version of TensorFlow runtime it runs on. Consult the TensorFlow Java @@ -41,8 +47,7 @@ TensorFlow Java to your project. The easiest one is to add a dependency on the Core API and the native dependencies it requires to run on all supported platforms. -You can also select the `tensorflow-core-platform-gpu` extension instead, which -supports CUDA® on Linux and Windows platforms. +To include CUDA® support for Linux x86, select the `tensorflow-core-native:linux-x86_64-gpu` artifact. In addition, a separate dependency on the `tensorflow-framework` library can be added to benefit from a rich set of utilities for TensorFlow-based machine @@ -58,7 +63,7 @@ For example, org.tensorflow tensorflow-core-platform - 0.5.0 + 1.1.0 ``` @@ -101,7 +106,7 @@ snapshots repository in your `pom.xml`. org.tensorflow tensorflow-core-platform - 1.0.0-SNAPSHOT + 1.2.0-SNAPSHOT ``` @@ -118,7 +123,7 @@ repositories { } dependencies { - compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '0.5.0' + compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '1.0.0' } ``` @@ -164,7 +169,7 @@ add the TensorFlow dependency to the project's `pom.xml` file: org.tensorflow tensorflow-core-platform - 0.5.0 + 1.1.0 diff --git a/docs/docs/references.md b/docs/docs/references.md new file mode 100644 index 00000000000..524b23dc675 --- /dev/null +++ b/docs/docs/references.md @@ -0,0 +1,8 @@ +--- +hide: + - navigation + - toc + - title +--- +# + \ No newline at end of file diff --git a/docs/docs/stylesheets/extra.css b/docs/docs/stylesheets/extra.css new file mode 100644 index 00000000000..70aefe6843e --- /dev/null +++ b/docs/docs/stylesheets/extra.css @@ -0,0 +1,14 @@ +:root > * { + /*--md-primary-fg-color: #EE782F;*/ + /*--md-primary-fg-color--light: #455960;*/ + /*--md-primary-fg-color--dark: #90030C;*/ +} + +.md-typeset h1, .md-typeset h2 { + font-weight: 800; + letter-spacing: -.01em; +} + +.md-sidebar--primary { + display: none; +} \ No newline at end of file diff --git a/docs/legacy_tools/build_java_api_docs.py b/docs/legacy_tools/build_java_api_docs.py new file mode 100644 index 00000000000..77d3ba80f31 --- /dev/null +++ b/docs/legacy_tools/build_java_api_docs.py @@ -0,0 +1,132 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +###################################################################################################################### +# IMPORTANT: Files in legacy_tools are no longer used to generate the TensorFlow-Java API docs as there are unfixed issues +# when using DocLava outside of the Google environment. We are keeping these for reference in case they are useful later. +###################################################################################################################### + + +"""Generate TensorFlow Java reference docs for TensorFlow.org.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pathlib +import shutil +import tempfile +import io +import requests +import zipfile +from git import Repo + +from absl import app +from absl import flags + +from tensorflow_docs.api_generator import gen_java + +FLAGS = flags.FLAGS + +NDARRAY_VERSION = 'v1.0.0' +JAVACPP_VERSION = '1.5.11' +PROTOBUF_VERSION = 'v3.21.9' + +# __file__ is the path to this file +TOOLS_DIR = pathlib.Path(__file__).resolve().parent +DOCS_DIR = TOOLS_DIR.parent +REPO_ROOT = DOCS_DIR.parent +DOC_OUTPUT_DIR = DOCS_DIR.joinpath("output") + +SECTION_LABELS = { + 'org.tensorflow': 'Core', + 'org.tensorflow.ndarray': 'NdArray', + 'org.tensorflow.framework': 'Framework', +} + +# These flags are required by infrastructure, not all of them are used. +flags.DEFINE_string('output_dir', f"{DOC_OUTPUT_DIR}", + ("Use this branch as the root version and don't" + ' create in version directory')) + +flags.DEFINE_string('site_path', 'api_docs/', + 'Path prefix in the _toc.yaml') + +flags.DEFINE_string('code_url_prefix', None, + '[UNUSED] The url prefix for links to code.') + +flags.DEFINE_bool( + 'search_hints', True, + '[UNUSED] Include metadata search hints in the generated files') + + +def checkout_repo(repo_url: str, target_dir_name: str, version: str): + local_repo_path = f"{REPO_ROOT}/{target_dir_name}" + if not pathlib.Path(local_repo_path).exists(): + local_repo = Repo.clone_from(repo_url, local_repo_path) + else: + local_repo = Repo(local_repo_path) + local_repo.remotes['origin'].fetch() + local_repo.git.checkout(version) + + +def overlay(from_root, to_root): + for from_path in pathlib.Path(from_root).rglob('*'): + relpath = from_path.relative_to(from_root) + to_path = to_root/relpath + if from_path.is_file(): + assert not to_path.exists() + shutil.copyfile(from_path, to_path) + else: + to_path.mkdir(exist_ok=True) + + +def main(unused_argv): + checkout_repo('https://github.com/tensorflow/java-ndarray', 'ndarray', NDARRAY_VERSION) + checkout_repo('https://github.com/bytedeco/javacpp', 'javacpp', JAVACPP_VERSION) + response = requests.get('https://repo1.maven.org/maven2/com/google/protobuf/protobuf-java/3.21.9/protobuf-java-3.21.9-sources.jar') + with zipfile.ZipFile(io.BytesIO(response.content)) as z: + z.extractall(f"{REPO_ROOT}/protobuf") + response = requests.get('https://repo1.maven.org/maven2/org/osgi/osgi.annotation/8.1.0/osgi.annotation-8.1.0-sources.jar') + with zipfile.ZipFile(io.BytesIO(response.content)) as z: + z.extractall(f"{REPO_ROOT}/osgi") + + merged_source = pathlib.Path(tempfile.mkdtemp()) + (merged_source / 'java/org').mkdir(parents=True) + shutil.copytree(REPO_ROOT/'tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/', merged_source/'java/org/tensorflow') + overlay(REPO_ROOT/'tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow', merged_source/'java/org/tensorflow') + overlay(REPO_ROOT/'tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow', merged_source/'java/org/tensorflow') + overlay(REPO_ROOT/'tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/', merged_source/'java/org/tensorflow/') + overlay(REPO_ROOT/'tensorflow-core/tensorflow-core-native/src/main/java/org/tensorflow/', merged_source/'java/org/tensorflow/') + shutil.copytree(REPO_ROOT/'tensorflow-framework/src/main/java/org/tensorflow/framework', merged_source/'java/org/tensorflow/framework') + shutil.copytree(REPO_ROOT/'ndarray/ndarray/src/main/java/org/tensorflow/ndarray', merged_source/'java/org/tensorflow/ndarray') + shutil.copytree(REPO_ROOT/'javacpp/src/main/java/org/bytedeco', merged_source/'java/org/bytedeco') + shutil.copytree(REPO_ROOT/'protobuf/com/', merged_source/'java/com') + shutil.copytree(REPO_ROOT/'osgi/org/osgi', merged_source/'java/org/osgi') + + gen_java.gen_java_docs( + package='org.tensorflow', + source_path=merged_source / 'java', + output_dir=pathlib.Path(FLAGS.output_dir), + site_path=pathlib.Path(FLAGS.site_path), + section_labels=SECTION_LABELS, + # Uncomment for local testing: + script_path=pathlib.Path(TOOLS_DIR, 'run-javadoc-for-tf-local.sh'), + ) + + +if __name__ == '__main__': + flags.mark_flags_as_required(['output_dir']) + app.run(main) diff --git a/docs/legacy_tools/requirements.txt b/docs/legacy_tools/requirements.txt new file mode 100644 index 00000000000..4435ca4d4a9 --- /dev/null +++ b/docs/legacy_tools/requirements.txt @@ -0,0 +1,8 @@ +###################################################################################################################### +# IMPORTANT: Files in legacy_tools are no longer used to generate the TensorFlow-Java API docs as there are unfixed issues +# when using DocLava outside of the Google environment. We are keeping these for reference in case they are useful later. +###################################################################################################################### + +GitPython +requests +tensorflow-docs \ No newline at end of file diff --git a/docs/legacy_tools/run-javadoc-for-tf-local.sh b/docs/legacy_tools/run-javadoc-for-tf-local.sh new file mode 100644 index 00000000000..97d59ddfd6e --- /dev/null +++ b/docs/legacy_tools/run-javadoc-for-tf-local.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +###################################################################################################################### +# IMPORTANT: Files in legacy_tools are no longer used to generate the TensorFlow-Java API docs as there are unfixed issues +# when using DocLava outside of the Google environment. We are keeping these for reference in case they are useful later. +###################################################################################################################### + +set -ex + +export JAVA_HOME=/Library/Java/JavaVirtualMachines/zulu-11.jdk/Contents/Home # Or change to any JDK 11 home path + +# https://android.googlesource.com/platform/external/doclava/ +# There's a debian package: +# https://packages.debian.org/unstable/doclava-aosp +# Install with: +# +# $ sudo apt install doclava-aosp #v 6.0.1+r55-1+build1 +# +# https://unix.stackexchange.com/questions/594841/how-do-i-assign-a-value-to-a-bash-variable-iff-that-variable-is-null-unassigned +DOCLAVA_JAR=${DOCLAVA_JAR:-'tools/lib/doclava.jar'} # Build lib locally + + +# Install java clear silver templates with: +# +# $ sudo apt install libjsilver-aosp-java #v 6.0.1+r55-1+build1 +JSILVER_JAR=${JSILVER_JAR:-'tools/lib/jsilver.jar'} # Build lib locally + + +######### DELETE OUTPUT_DIR ################# + +# Empty the output directory in case a class has been deleted +rm -rf "${OUTPUT_DIR:?}"/* +############ RUN DOCLAVA ################### + +# $FEDERATED_DOCS is a space-separated string of url,file pairs. +read -a api_pairs <<< "${FEDERATED_DOCS}" +FEDERATED_PARAMS="" +for i in "${!api_pairs[@]}"; do + api_pair_str="${api_pairs[$i]}" # "url,api.txt" + read -a api_pair <<< "${api_pair_str//,/ }" + # Using the index as the API "name", build the federation params. Note that + # using 0 as an API name will evaluate to false and cause rendering bugs, + # so we preface with "api_". + FEDERATED_PARAMS+=" -federate api_${i} ${api_pair[0]}" + FEDERATED_PARAMS+=" -federationapi api_${i} ${api_pair[1]}" +done + +# To install javadoc, for example, use +# +# sudo apt install openjdk-11-jdk +# +# doclava doesn't work with openjdk-13 +# ``` +# javadoc: error - Class com.google.doclava.Doclava is not a valid doclet. +# Note: As of JDK 13, the com.sun.javadoc API is no longer supported. +# ``` +# It's used here: https://android.googlesource.com/platform/external/doclava/+/refs/heads/master/src/com/google/doclava/Doclava.java + +# Each package in $PACKAGE needs to prefaced with -subpackages, so do that. +SUBPACKAGES="" +read -r -a packages <<< "${PACKAGE}" +for pkg in "${packages[@]}"; do + SUBPACKAGES+=" -subpackages ${pkg}" +done +( # Capture the return code. it may be non-zero for minor errors. + /Library/Java/JavaVirtualMachines/zulu-11.jdk/Contents/Home/bin/javadoc \ + -sourcepath "${SOURCE_PATH}" \ + -docletpath "${DOCLAVA_JAR}:${JSILVER_JAR}" \ + -doclet com.google.doclava.Doclava \ + -toroot "${SITE_PATH}"/ \ + -yaml _toc.yaml \ + -templatedir "${TEMPLATES}" \ + -public \ + -d "${OUTPUT_DIR}" \ + ${FEDERATED_PARAMS} \ + ${SUBPACKAGES} +) + + +mv "${OUTPUT_DIR}"/reference/* "${OUTPUT_DIR}" + +################################################################### +################### START OF POST-PROCESSING ###################### +################################################################### +rm "${OUTPUT_DIR}/navtree_data.js" || true +rm "${OUTPUT_DIR}/hierarchy.html" || true + +find ${OUTPUT_DIR} -name "*.html" | xargs sed -i '' "s|${SITE_PATH}/reference|${SITE_PATH}|g" +find ${OUTPUT_DIR} -name "*.yaml" | xargs sed -i '' "s|${SITE_PATH}/reference|${SITE_PATH}|g" +find ${OUTPUT_DIR} -name "*.html" | xargs sed -i '' "s|a href=\"reference/org/tensorflow|a href=\"${SITE_PATH}/org/tensorflow|g" +find ${OUTPUT_DIR} -name "*.html" | xargs sed -i '' "s|a href=\"reference/com/google|a href=\"${SITE_PATH}/com/google|g" + +JAVA_LANG=https://docs.oracle.com/javase/8/docs/api +find ${OUTPUT_DIR} -name "*.html" | xargs sed -i '' "s|a href=\"reference/java/lang|a href=\"${JAVA_LANG}/java/lang|g" + +find ${OUTPUT_DIR} -name "*.html" | xargs sed -i '' 's|
|
|g'
+
+rm ${OUTPUT_DIR}/timestamp.js || true
+rm ${OUTPUT_DIR}/lists.js || true
+rm ${OUTPUT_DIR}/index.html || true
+
+cp ${TEMPLATES}/screen.css ${OUTPUT_DIR}/
+
+
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
new file mode 100644
index 00000000000..8729bca5af5
--- /dev/null
+++ b/docs/mkdocs.yml
@@ -0,0 +1,49 @@
+site_name: ''
+site_url: https://tensorflow.org
+repo_url: https://github.com/tensorflow/java
+site_description: Documentation of TensorFlow Java API and tools.
+copyright: "© TensorFlow Authors 2025"
+
+theme:
+  name: material
+  logo: assets/tensorflow.svg
+  features:
+    - navigation.indexes
+    - navigation.instant
+    - navigation.sections
+    - navigation.tabs
+    - navigation.tabs.sticky
+    - toc.follow
+  palette:
+    # Palette toggle for automatic mode
+    - media: "(prefers-color-scheme)"
+      toggle:
+        icon: material/brightness-auto
+        name: Switch to light mode
+    # Palette toggle for light mode
+    - media: "(prefers-color-scheme: light)"
+      scheme: default
+      primary: white
+      accent: orange
+      toggle:
+        icon: material/brightness-7
+        name: Switch to dark mode
+    # Palette toggle for dark mode
+    - media: "(prefers-color-scheme: dark)"
+      scheme: slate
+      primary: black
+      accent: orange
+      toggle:
+        icon: material/brightness-4
+        name: Switch to system preference
+
+extra_css:
+  - stylesheets/extra.css
+
+nav:
+  - Home:
+      - index.md
+  - Install:
+      - install.md
+  - References:
+      - apidocs/index.html
diff --git a/pom.xml b/pom.xml
index f86e92bc69a..f7f90275778 100644
--- a/pom.xml
+++ b/pom.xml
@@ -7,7 +7,7 @@
 
   org.tensorflow
   tensorflow-java
-  1.0.0-SNAPSHOT
+  1.2.0-SNAPSHOT
   pom
 
   TensorFlow Java Parent
@@ -26,7 +26,7 @@
 
   
     https://github.com/tensorflow/java.git
-    git@github.com:tensorflow/java.git
+    scm:git@github.com:tensorflow/java.git
     scm:git:https://github.com/tensorflow/java.git
   
 
@@ -36,6 +36,8 @@
   
 
   
+    ${os.name}-${os.arch}
+
     UTF8
     11
     11
@@ -43,11 +45,11 @@
     5.10.0
     1.37
     2.7
-    2.10.0
+    2.25.0
     true
     true
     true
-    2.43.0
+    2.46.1
   
 
   
@@ -210,7 +212,7 @@
                  
                 -Xlint:all
                 -XDcompilePolicy=simple
-                -Xplugin:ErrorProne
+                
                 -J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED
                 -J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED
 
@@ -327,6 +329,18 @@
         
       
     
+    
+      linux-arm64
+      
+        
+          linux
+          aarch64
+        
+        
+          !javacpp.platform.extension
+        
+      
+    
     
       macosx
       
@@ -532,16 +546,38 @@
       
       
         maven-javadoc-plugin
-        3.6.0
+        3.12.0
+      
+          ./docs/overview.md
+          
+          Copyright 2015, 2025 The TensorFlow Authors. All Rights Reserved.
+          
+              -Xmaxerrs
+              65536
+              -Xmaxwarns
+              65536
+          
+          false
+          256m
+          2048m
+          
+              https://tensorflow.github.io/java/javadoc-ndarray/v1.0.0/
+              https://protobuf.dev/reference/java/api-docs
+              https://bytedeco.org/javacpp/apidocs
+          
+      
         
+            
+                javadoc-site
+                
+                    javadoc
+                
+            
           
             attach-javadocs
             
               jar
             
-            
-              true
-            
           
         
       
diff --git a/release.sh b/release.sh
index 01f99386a71..acd1041d766 100755
--- a/release.sh
+++ b/release.sh
@@ -34,7 +34,7 @@ fi
 # To get a shell to poke around the maven artifacts with.
 if [[ -z "${CMD}" ]]
 then
-  CMD="mvn clean deploy -B -e --settings ./settings.xml -Pdeploying -Preleasing -DstagingRepositoryId=${STAGING_SEQ}"
+  CMD="mvn clean deploy -B -e --settings ./settings.xml -Pdeploying -Preleasing -DstagingRepositoryId=orgtensorflow-${STAGING_SEQ}"
 fi
 
 export GPG_TTY=$(tty)
diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml
index 89b4b06a2d3..14d155dd901 100644
--- a/tensorflow-core/pom.xml
+++ b/tensorflow-core/pom.xml
@@ -22,7 +22,7 @@
   
     org.tensorflow
     tensorflow-java
-    1.0.0-SNAPSHOT
+    1.2.0-SNAPSHOT
   
   tensorflow-core
   pom
@@ -34,7 +34,6 @@
     tensorflow-core-native
     tensorflow-core-generator
     tensorflow-core-api
-    tensorflow-core-platform
   
 
   
@@ -60,8 +59,7 @@
     macosx-arm64${javacpp.platform.extension}
     macosx-x86_64${javacpp.platform.extension}
     windows-x86_64${javacpp.platform.extension}
-    1.5.10
-
+    1.5.12
   
 
   
@@ -71,601 +69,9 @@
         must have already been built, tested and deployed priorly from their respective platform.
       -->
       deploying
-      
-        ${os.name}-${os.arch}
-      
-    
-
-    
-      
-      javacpp-platform-host
-      
-        true
-        
-          javacpp.platform.host
-        
-      
-      
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-        ${os.name}-${os.arch}${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp-platform-custom
-      
-        
-          javacpp.platform
-        
-      
-      
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-        ${javacpp.platform}${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-true
-      
-        
-          javacpp.platform.custom
-        
-      
-      
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-      
-    
-
-    
-      javacpp-platform-none
-      
-        
-          javacpp.platform.none
-        
-      
-      
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-      
-    
-
-    
-      javacpp-platform-linux-armhf
-      
-        
-          javacpp.platform
-          linux-armhf
-        
-      
-      
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-        
-        
-        
-        
-        
-        
-        
-        
-      
-    
-
-    
-      javacpp-platform-linux-arm64
-      
-        
-          javacpp.platform
-          linux-arm64
-        
-      
-      
-        
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-        
-        
-        
-        
-        
-        
-        
-      
-    
-
-    
-      javacpp-platform-linux-x86_64
-      
-        
-          javacpp.platform
-          linux-x86_64
-        
-      
-      
-        
-        
-        
-        
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-        
-        
-        
-        
-      
-    
-
-    
-      javacpp-platform-macosx-arm64
-      
-        
-          javacpp.platform
-          macosx-arm64
-        
-      
-      
-        
-        
-        
-        
-        
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-        
-        
-        
-      
+      
+        tensorflow-core-platform
+      
     
-
-    
-      javacpp-platform-macosx-x86_64
-      
-        
-          javacpp.platform
-          macosx-x86_64
-        
-      
-      
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-        
-        
-      
-    
-
-    
-      javacpp-platform-windows-x86_64
-      
-        
-          javacpp.platform
-          windows-x86_64
-        
-      
-      
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}
-        
-        
-        
-        
-        
-        
-        
-        
-        ${javacpp.platform}${javacpp.platform.extension}
-      
-    
-
-    
-    
-      javacpp.platform.linux-armhf-true
-      
-        
-          javacpp.platform.linux-armhf
-        
-      
-      
-        linux-armhf
-        linux-armhf${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.linux-arm64-true
-      
-        
-          javacpp.platform.linux-arm64
-        
-      
-      
-        linux-arm64
-        linux-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.linux-x86_64-true
-      
-        
-          javacpp.platform.linux-x86_64
-        
-      
-      
-        linux-x86_64
-        linux-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.macosx-arm64-true
-      
-        
-          javacpp.platform.macosx-arm64
-        
-      
-      
-        macosx-arm64
-        macosx-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.macosx-x86_64-true
-      
-        
-          javacpp.platform.macosx-x86_64
-        
-      
-      
-        macosx-x86_64
-        macosx-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.windows-x86_64-true
-      
-        
-          javacpp.platform.windows-x86_64
-        
-      
-      
-        windows-x86_64
-        windows-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-arm
-      
-        
-          javacpp.platform.host
-        
-        linuxarm
-      
-      
-        linux-armhf
-        linux-armhf${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-armhf
-      
-        
-          javacpp.platform.host
-        
-        linuxarmhf
-      
-      
-        linux-armhf
-        linux-armhf${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-aarch64
-      
-        
-          javacpp.platform.host
-        
-        linuxaarch64
-      
-      
-        linux-arm64
-        linux-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-armv8
-      
-        
-          javacpp.platform.host
-        
-        linuxarmv8
-      
-      
-        linux-arm64
-        linux-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-arm64
-      
-        
-          javacpp.platform.host
-        
-        linuxarm64
-      
-      
-        linux-arm64
-        linux-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-amd64
-      
-        
-          javacpp.platform.host
-        
-        linuxamd64
-      
-      
-        linux-x86_64
-        linux-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-x86-64
-      
-        
-          javacpp.platform.host
-        
-        linuxx86-64
-      
-      
-        linux-x86_64
-        linux-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-linux-x86_64
-      
-        
-          javacpp.platform.host
-        
-        linuxx86_64
-      
-      
-        linux-x86_64
-        linux-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-macosx-amd64
-      
-        
-          javacpp.platform.host
-        
-        mac os xamd64
-      
-      
-        macosx-x86_64
-        macosx-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-macosx-x86-64
-      
-        
-          javacpp.platform.host
-        
-        mac os xx86-64
-      
-      
-        macosx-x86_64
-        macosx-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-macosx-aarch64
-      
-        
-          javacpp.platform.host
-        
-        mac os xaarch64
-      
-      
-        macosx-arm64
-        macosx-arm64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-macosx-x86_64
-      
-        
-          javacpp.platform.host
-        
-        mac os xx86_64
-      
-      
-        macosx-x86_64
-        macosx-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-windows-amd64
-      
-        
-          javacpp.platform.host
-        
-        windowsamd64
-      
-      
-        windows-x86_64
-        windows-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-windows-x86-64
-      
-        
-          javacpp.platform.host
-        
-        windowsx86-64
-      
-      
-        windows-x86_64
-        windows-x86_64${javacpp.platform.extension}
-      
-    
-
-    
-      javacpp.platform.custom-windows-x86_64
-      
-        
-          javacpp.platform.host
-        
-        windowsx86_64
-      
-      
-        windows-x86_64
-        windows-x86_64${javacpp.platform.extension}
-      
-    
-
   
-
 
-
diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml
index 1d43bd43454..59e1703d355 100644
--- a/tensorflow-core/tensorflow-core-api/pom.xml
+++ b/tensorflow-core/tensorflow-core-api/pom.xml
@@ -6,7 +6,7 @@
   
     org.tensorflow
     tensorflow-core
-    1.0.0-SNAPSHOT
+    1.2.0-SNAPSHOT
   
   tensorflow-core-api
   jar
@@ -15,7 +15,7 @@
   Platform-dependent native code and pure-Java code for the TensorFlow machine intelligence library.
 
   
-    1.0.0-rc.1
+    1.0.0
     1.1.5
     false
     ${project.build.directory}/tf-text-download/
@@ -220,24 +220,6 @@
         
       
 
-      
-        maven-javadoc-plugin
-        3.6.0
-        
-          
-            attach-javadocs
-            
-              jar
-            
-            
-              false
-              256m
-              2048m
-            
-          
-        
-      
-
       
         org.codehaus.mojo
         exec-maven-plugin
diff --git a/tensorflow-core/tensorflow-core-api/scripts/test_download.sh b/tensorflow-core/tensorflow-core-api/scripts/test_download.sh
index 146868f26d6..5d1c2988d7e 100755
--- a/tensorflow-core/tensorflow-core-api/scripts/test_download.sh
+++ b/tensorflow-core/tensorflow-core-api/scripts/test_download.sh
@@ -5,10 +5,13 @@ DOWNLOAD_FOLDER="$1"
 
 case ${PLATFORM:-} in
   'linux-x86_64')
-    TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/20/a0/bdbf2a11141f1c93e572364d13c42537cfe811b747a0bbb58fdd904f3960/tensorflow_text-2.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
+    TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/f3/73/3a906feb0d71d9353c6fb2363d4052856cc6eff5a78a097b1a6002d4e908/tensorflow_text-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
     ;;
-  'macosx-x86_64')
-    TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/8a/fe/a2f19d3d3ab834c3fa1007c970b0b86573beb929c86ca6c85cd13e86e4b2/tensorflow_text-2.15.0-cp311-cp311-macosx_10_9_x86_64.whl'
+  'linux-arm64')
+    TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/8a/9a/ebba9f6274f8b51e5fe1ac2411b8b6bf680a32d10bd6e9c54be1faeec062/tensorflow_text-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl'
+    ;;
+  'macosx-arm64')
+    TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/18/b6/8ad233edb0732847db1da538cea941dcccc42f59304ff6fb449676e6dd5a/tensorflow_text-2.18.1-cp311-cp311-macosx_11_0_arm64.whl'
     ;;
   *)
     echo "TensorFlow Text distribution for ${PLATFORM} is not supported for download"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Abort.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Abort.pbtxt
index 58448c2d17b..7d90f6d9fc7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Abort.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Abort.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Abort"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Abs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Abs.pbtxt
index ece45cf73f3..5ae7934e3cf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Abs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Abs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Abs"
   endpoint {
     name: "math.Abs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulateNV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulateNV2.pbtxt
index 0c7a080c115..ae2d6e0c7fd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulateNV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulateNV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AccumulateNV2"
   endpoint {
     name: "math.AccumulateN"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorApplyGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorApplyGradient.pbtxt
index 49b7acad7d8..ecf18bfde4d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorApplyGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorApplyGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AccumulatorApplyGradient"
   endpoint {
     name: "train.AccumulatorApplyGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorNumAccumulated.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorNumAccumulated.pbtxt
index 1c42e819bf4..c9f5db313ee 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorNumAccumulated.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorNumAccumulated.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AccumulatorNumAccumulated"
   endpoint {
     name: "train.AccumulatorNumAccumulated"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorSetGlobalStep.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorSetGlobalStep.pbtxt
index ca85302cdb4..53dbca3a28a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorSetGlobalStep.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorSetGlobalStep.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AccumulatorSetGlobalStep"
   endpoint {
     name: "train.AccumulatorSetGlobalStep"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorTakeGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorTakeGradient.pbtxt
index 4883802c637..d8482bfef55 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorTakeGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AccumulatorTakeGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AccumulatorTakeGradient"
   endpoint {
     name: "train.AccumulatorTakeGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Acos.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Acos.pbtxt
index 847986b429d..d730005b322 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Acos.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Acos.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Acos"
   endpoint {
     name: "math.Acos"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Acosh.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Acosh.pbtxt
index 76d8f5fad05..7f880491eae 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Acosh.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Acosh.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Acosh"
   endpoint {
     name: "math.Acosh"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Add.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Add.pbtxt
index 4f78ccc9ea6..b213eb8dd32 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Add.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Add.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Add"
   endpoint {
     name: "math.Add"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddManySparseToTensorsMap.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddManySparseToTensorsMap.pbtxt
index e009ba19d34..8dcebf4c82b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddManySparseToTensorsMap.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddManySparseToTensorsMap.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AddManySparseToTensorsMap"
   endpoint {
     name: "sparse.AddManySparseToTensorsMap"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddN.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddN.pbtxt
index 20d469ae731..8807e161276 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddN.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddN.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AddN"
   endpoint {
     name: "math.AddN"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddSparseToTensorsMap.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddSparseToTensorsMap.pbtxt
index 0bb20186de3..d46dc06cd51 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AddSparseToTensorsMap.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AddSparseToTensorsMap.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AddSparseToTensorsMap"
   endpoint {
     name: "sparse.AddSparseToTensorsMap"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustContrastv2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustContrastv2.pbtxt
index 81f565c1d59..bbf539a05de 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustContrastv2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustContrastv2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AdjustContrastv2"
   endpoint {
     name: "image.AdjustContrast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustHue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustHue.pbtxt
index 0847cad4031..9cfca205fb5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustHue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustHue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AdjustHue"
   endpoint {
     name: "image.AdjustHue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustSaturation.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustSaturation.pbtxt
index d685636eb12..679b1d48ab9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustSaturation.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AdjustSaturation.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AdjustSaturation"
   endpoint {
     name: "image.AdjustSaturation"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_All.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_All.pbtxt
index a6459c56b71..89ab8929419 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_All.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_All.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "All"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AllCandidateSampler.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AllCandidateSampler.pbtxt
index 607c208a460..2a260b630af 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AllCandidateSampler.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AllCandidateSampler.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AllCandidateSampler"
   endpoint {
     name: "random.AllCandidateSampler"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AllToAll.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AllToAll.pbtxt
index fd03c0df632..1ce77f7d74a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AllToAll.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AllToAll.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AllToAll"
   endpoint {
     name: "tpu.AllToAll"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Angle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Angle.pbtxt
index a92ccf357db..fd3770221f8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Angle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Angle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Angle"
   endpoint {
     name: "math.Angle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousHashTable.pbtxt
index 5508431f163..5b60d123270 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousHashTable.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousHashTable.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousHashTable"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMemoryCache.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMemoryCache.pbtxt
index 030b1a92bfc..fcde7026956 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMemoryCache.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMemoryCache.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousMemoryCache"
   endpoint {
     name: "data.AnonymousMemoryCache"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableDenseHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableDenseHashTable.pbtxt
index 1774d42d57f..fe75322c561 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableDenseHashTable.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableDenseHashTable.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousMutableDenseHashTable"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTable.pbtxt
index 80aec142155..69f531da488 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTable.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTable.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousMutableHashTable"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTableOfTensors.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTableOfTensors.pbtxt
index 069a81cbac7..409abc6f6d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTableOfTensors.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousMutableHashTableOfTensors.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousMutableHashTableOfTensors"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousRandomSeedGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousRandomSeedGenerator.pbtxt
index 50621a2ac4c..4c3c3cd98a6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousRandomSeedGenerator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousRandomSeedGenerator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousRandomSeedGenerator"
   endpoint {
     name: "random.AnonymousRandomSeedGenerator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousSeedGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousSeedGenerator.pbtxt
index a3fb16094ef..cf4c8f4f339 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousSeedGenerator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AnonymousSeedGenerator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AnonymousSeedGenerator"
   endpoint {
     name: "random.AnonymousSeedGenerator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Any.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Any.pbtxt
index 20b36eda3f8..c96baa7525d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Any.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Any.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Any"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdaMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdaMax.pbtxt
index 583f164e06c..b552249c876 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdaMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdaMax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdaMax"
   endpoint {
     name: "train.ApplyAdaMax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdadelta.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdadelta.pbtxt
index e672a8ef03b..e16875bc976 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdadelta.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdadelta.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdadelta"
   endpoint {
     name: "train.ApplyAdadelta"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagrad.pbtxt
index 980c57c5fec..3de2b67d1b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdagrad"
   endpoint {
     name: "train.ApplyAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradDA.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradDA.pbtxt
index 815df985ef9..e51c4bd8155 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradDA.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradDA.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdagradDA"
   endpoint {
     name: "train.ApplyAdagradDa"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradV2.pbtxt
index 34cfdb57ab8..cfa90ac82c2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdagradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdagradV2"
   endpoint {
     name: "train.ApplyAdagradV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdam.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdam.pbtxt
index 56461b1d3d5..85ff2d1bad3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdam.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAdam.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAdam"
   endpoint {
     name: "train.ApplyAdam"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAddSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAddSign.pbtxt
index b54ff6eca44..21a5f40a078 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAddSign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyAddSign.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyAddSign"
   endpoint {
     name: "train.ApplyAddSign"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyCenteredRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyCenteredRMSProp.pbtxt
index 1b831bca436..ec1b6380779 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyCenteredRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyCenteredRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyCenteredRMSProp"
   endpoint {
     name: "train.ApplyCenteredRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyFtrlV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyFtrlV2.pbtxt
index da0fc8fcbf7..08a86347aef 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyFtrlV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyFtrlV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyFtrlV2"
   endpoint {
     name: "train.ApplyFtrl"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyGradientDescent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyGradientDescent.pbtxt
index 1fa569ed329..335095ef520 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyGradientDescent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyGradientDescent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyGradientDescent"
   endpoint {
     name: "train.ApplyGradientDescent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyMomentum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyMomentum.pbtxt
index 96c21199f09..4a7079316b4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyMomentum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyMomentum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyMomentum"
   endpoint {
     name: "train.ApplyMomentum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyPowerSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyPowerSign.pbtxt
index e5c22347556..0a816803266 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyPowerSign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyPowerSign.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyPowerSign"
   endpoint {
     name: "train.ApplyPowerSign"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalAdagrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalAdagrad.pbtxt
index a52d8c3591c..774d00e707c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalAdagrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalAdagrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyProximalAdagrad"
   endpoint {
     name: "train.ApplyProximalAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalGradientDescent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalGradientDescent.pbtxt
index 74ea29cf888..3458df77763 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalGradientDescent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyProximalGradientDescent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyProximalGradientDescent"
   endpoint {
     name: "train.ApplyProximalGradientDescent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyRMSProp.pbtxt
index 90171ccc759..259b5512e16 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApplyRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApplyRMSProp"
   endpoint {
     name: "train.ApplyRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproxTopK.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproxTopK.pbtxt
index da26057b420..51b0cc7c01f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproxTopK.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproxTopK.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApproxTopK"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproximateEqual.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproximateEqual.pbtxt
index 029dc6d2930..d392987d60a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproximateEqual.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ApproximateEqual.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ApproximateEqual"
   endpoint {
     name: "math.ApproximateEqual"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMax.pbtxt
index f9effd49c4a..5627186359b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ArgMax"
   endpoint {
     name: "math.ArgMax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMin.pbtxt
index 5ff04c0d1ab..e01e5f2e72b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ArgMin.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ArgMin"
   endpoint {
     name: "math.ArgMin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AsString.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AsString.pbtxt
index e8c875ea814..a020c7aef85 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AsString.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AsString.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AsString"
   endpoint {
     name: "dtypes.AsString"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Asin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Asin.pbtxt
index 8ffc8e3e570..7b71c08eede 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Asin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Asin.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Asin"
   endpoint {
     name: "math.Asin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Asinh.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Asinh.pbtxt
index e3b30dd5125..2a371a10071 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Asinh.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Asinh.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Asinh"
   endpoint {
     name: "math.Asinh"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Assert.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Assert.pbtxt
index a9e107b4780..44d1ce33dd7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Assert.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Assert.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Assert"
   endpoint {
     name: "AssertThat"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssertPrevDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssertPrevDataset.pbtxt
index 610d5f8216e..246fdd58a4a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssertPrevDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssertPrevDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssertPrevDataset"
   endpoint {
     name: "data.AssertPrevDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Assign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Assign.pbtxt
index 15d778f61e8..51c43e54d2e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Assign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Assign.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Assign"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAdd.pbtxt
index a4118b64afd..9f29218e945 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAddVariableOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAddVariableOp.pbtxt
index 05fecb191bf..f724f706878 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAddVariableOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignAddVariableOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignAddVariableOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSub.pbtxt
index aaf9246a6ac..a492c335154 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSubVariableOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSubVariableOp.pbtxt
index 6e8791aed2d..768f4c47169 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSubVariableOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignSubVariableOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignSubVariableOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableOp.pbtxt
index 980e6968269..9e61072ca68 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignVariableOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableXlaConcatND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableXlaConcatND.pbtxt
index f45351cddeb..9bf3d7734a6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableXlaConcatND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AssignVariableXlaConcatND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AssignVariableXlaConcatND"
   endpoint {
     name: "xla.AssignVariableConcatND"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan.pbtxt
index e51aee9abc4..bb00076b52d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Atan"
   endpoint {
     name: "math.Atan"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan2.pbtxt
index 302b05f9dce..f313a44b032 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atan2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Atan2"
   endpoint {
     name: "math.Atan2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atanh.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atanh.pbtxt
index b9c4a411544..59e98471ce1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Atanh.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Atanh.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Atanh"
   endpoint {
     name: "math.Atanh"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSpectrogram.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSpectrogram.pbtxt
index bd8f3a5e335..8731927d50c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSpectrogram.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSpectrogram.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AudioSpectrogram"
   endpoint {
     name: "audio.AudioSpectrogram"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSummaryV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSummaryV2.pbtxt
index e4eda8b09ab..954dbf9bb50 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSummaryV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AudioSummaryV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AudioSummaryV2"
   endpoint {
     name: "summary.AudioSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool.pbtxt
index 10d87802f0d..970557d9c96 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AvgPool"
   endpoint {
     name: "nn.AvgPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3D.pbtxt
index 1ae2794f48b..be8667cf31c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AvgPool3D"
   endpoint {
     name: "nn.AvgPool3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3DGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3DGrad.pbtxt
index 09aba78ca20..6bc2df28667 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3DGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPool3DGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AvgPool3DGrad"
   endpoint {
     name: "nn.AvgPool3dGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPoolGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPoolGrad.pbtxt
index fc8fec89b99..097ba7213f1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPoolGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_AvgPoolGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "AvgPoolGrad"
   endpoint {
     name: "nn.AvgPoolGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BandedTriangularSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BandedTriangularSolve.pbtxt
index 9be002004ce..9cf217624c6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BandedTriangularSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BandedTriangularSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BandedTriangularSolve"
   endpoint {
     name: "linalg.BandedTriangularSolve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Barrier.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Barrier.pbtxt
index 6e282ca7b39..7aada11ec00 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Barrier.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Barrier.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Barrier"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierClose.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierClose.pbtxt
index 0307318763b..75d923401c4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierClose.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierClose.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BarrierClose"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierIncompleteSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierIncompleteSize.pbtxt
index a2fed2a43de..53729fe5652 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierIncompleteSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierIncompleteSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BarrierIncompleteSize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierInsertMany.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierInsertMany.pbtxt
index 32e29f00158..163cfbeae5b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierInsertMany.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierInsertMany.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BarrierInsertMany"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierReadySize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierReadySize.pbtxt
index 0f768476610..f648bb15560 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierReadySize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierReadySize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BarrierReadySize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierTakeMany.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierTakeMany.pbtxt
index 21f08878c6d..5c6508a6963 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierTakeMany.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BarrierTakeMany.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BarrierTakeMany"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholesky.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholesky.pbtxt
index 15048109fd6..c1cdb6b892e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholesky.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholesky.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchCholesky"
   endpoint {
     name: "linalg.BatchCholesky"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholeskyGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholeskyGrad.pbtxt
index eb0e2c6bc83..c8e9b4060e7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholeskyGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchCholeskyGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchCholeskyGrad"
   endpoint {
     name: "linalg.BatchCholeskyGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT.pbtxt
index 4dda7c1fb61..cf02316b08c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchFFT"
   endpoint {
     name: "signal.BatchFft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT2D.pbtxt
index e11860138a2..4b09c73a82b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchFFT2D"
   endpoint {
     name: "signal.BatchFft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT3D.pbtxt
index 3be0b516d0e..0b4cdfac071 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchFFT3D"
   endpoint {
     name: "signal.BatchFft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFunction.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFunction.pbtxt
index 8789dc6acb6..2160e9f7b8a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFunction.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchFunction.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchFunction"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT.pbtxt
index de37ada148a..491d21ad4c4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchIFFT"
   endpoint {
     name: "signal.BatchIfft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT2D.pbtxt
index 4ae7fb4cb0a..61a773b3f76 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchIFFT2D"
   endpoint {
     name: "signal.BatchIfft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT3D.pbtxt
index 0ecb52714b5..6111f4c6006 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchIFFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchIFFT3D"
   endpoint {
     name: "signal.BatchIfft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatMulV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatMulV3.pbtxt
index 463513824d1..8a70e8e6e55 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatMulV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatMulV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatMulV3"
   endpoint {
     name: "train.BatchMatMul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixBandPart.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixBandPart.pbtxt
index de989c6d527..af80b346df8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixBandPart.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixBandPart.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixBandPart"
   endpoint {
     name: "linalg.BatchMatrixBandPart"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDeterminant.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDeterminant.pbtxt
index a45fe25d10c..ac3c9b2a5ec 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDeterminant.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDeterminant.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixDeterminant"
   endpoint {
     name: "linalg.BatchMatrixDeterminant"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiag.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiag.pbtxt
index d85d76f02f0..c30ccfb3e28 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiag.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiag.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixDiag"
   endpoint {
     name: "linalg.BatchMatrixDiag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiagPart.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiagPart.pbtxt
index 4b5350b11ee..cf215430e8e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiagPart.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixDiagPart.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixDiagPart"
   endpoint {
     name: "linalg.BatchMatrixDiagPart"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixInverse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixInverse.pbtxt
index f40ea50d4bc..113f9e268d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixInverse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixInverse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixInverse"
   endpoint {
     name: "linalg.BatchMatrixInverse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSetDiag.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSetDiag.pbtxt
index ac4cd6889b6..4d402f61466 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSetDiag.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSetDiag.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixSetDiag"
   endpoint {
     name: "linalg.BatchMatrixSetDiag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolve.pbtxt
index 97435acb4e4..2b5a9c70205 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixSolve"
   endpoint {
     name: "linalg.BatchMatrixSolve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolveLs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolveLs.pbtxt
index aee0b4add35..b95a4b7f1aa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolveLs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixSolveLs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixSolveLs"
   endpoint {
     name: "linalg.BatchMatrixSolveLs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixTriangularSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixTriangularSolve.pbtxt
index 554eff15747..39f614c58a2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixTriangularSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchMatrixTriangularSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchMatrixTriangularSolve"
   endpoint {
     name: "linalg.BatchMatrixTriangularSolve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalization.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalization.pbtxt
index 8285ac284d8..0b8ed84a609 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalization.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalization.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchNormWithGlobalNormalization"
   endpoint {
     name: "nn.BatchNormWithGlobalNormalization"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
index 7b18bf52acc..4aa3b421147 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchNormWithGlobalNormalizationGrad"
   endpoint {
     name: "nn.BatchNormWithGlobalNormalizationGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSelfAdjointEigV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSelfAdjointEigV2.pbtxt
index 9c973443902..4137098cf32 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSelfAdjointEigV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSelfAdjointEigV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchSelfAdjointEigV2"
   endpoint {
     name: "linalg.BatchSelfAdjointEig"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSvd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSvd.pbtxt
index 8696359df8d..73f619b157c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSvd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchSvd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchSvd"
   endpoint {
     name: "linalg.BatchSvd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpace.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpace.pbtxt
index affbc519e51..2cd926bf567 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpace.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpace.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchToSpace"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpaceND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpaceND.pbtxt
index 6c7d2fbdb9f..93d4335ac31 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpaceND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BatchToSpaceND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BatchToSpaceND"
   endpoint {
     name: "BatchToSpaceNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0.pbtxt
index 71b2007c630..88301e94ba7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselI0"
   endpoint {
     name: "math.BesselI0"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0e.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0e.pbtxt
index 84eb3b5e71d..f80adf8b7e6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0e.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI0e.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselI0e"
   endpoint {
     name: "math.BesselI0e"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1.pbtxt
index 9bb74686ef3..bbba9f7549f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselI1"
   endpoint {
     name: "math.BesselI1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1e.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1e.pbtxt
index 43f9113b0bb..e91b37684b8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1e.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselI1e.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselI1e"
   endpoint {
     name: "math.BesselI1e"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ0.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ0.pbtxt
index c9e7d5fad2e..1898e526094 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ0.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ0.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselJ0"
   endpoint {
     name: "math.special.BesselJ0"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ1.pbtxt
index 39f7cc0513c..cbe95c525cc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselJ1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselJ1"
   endpoint {
     name: "math.special.BesselJ1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0.pbtxt
index 670d0ce8ff3..ba380554645 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselK0"
   endpoint {
     name: "math.special.BesselK0"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0e.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0e.pbtxt
index 3cfac3d9c1b..09659504093 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0e.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK0e.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselK0e"
   endpoint {
     name: "math.special.BesselK0e"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1.pbtxt
index 6672d29a76d..91c3f998864 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselK1"
   endpoint {
     name: "math.special.BesselK1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1e.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1e.pbtxt
index 64c89fc43d4..334c1025b5f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1e.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselK1e.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselK1e"
   endpoint {
     name: "math.special.BesselK1e"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY0.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY0.pbtxt
index d55b57a5ccc..a813593994b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY0.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY0.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselY0"
   endpoint {
     name: "math.special.BesselY0"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY1.pbtxt
index 76b7c0afff8..cb7a004e1a2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BesselY1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BesselY1"
   endpoint {
     name: "math.special.BesselY1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Betainc.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Betainc.pbtxt
index 6e9956d9ec7..1931537fa76 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Betainc.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Betainc.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Betainc"
   endpoint {
     name: "math.Betainc"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAdd.pbtxt
index eb3be23bd9a..fa509206f83 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAdd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BiasAdd"
   endpoint {
     name: "nn.BiasAdd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAddGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAddGrad.pbtxt
index 4e040bf6df8..f36f4d41ca5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAddGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BiasAddGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BiasAddGrad"
   endpoint {
     name: "nn.BiasAddGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BigQueryReader.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BigQueryReader.pbtxt
index 5b6e11687a2..b98f8304793 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BigQueryReader.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BigQueryReader.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BigQueryReader"
   endpoint {
     name: "io.BigQueryReader"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bincount.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bincount.pbtxt
index b894fd6ec5e..d16999a510b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bincount.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bincount.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Bincount"
   endpoint {
     name: "math.Bincount"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bitcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bitcast.pbtxt
index 9d2db26851d..0b55c90620a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bitcast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bitcast.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Bitcast"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseAnd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseAnd.pbtxt
index db5fada2461..0b791ac5dda 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseAnd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseAnd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BitwiseAnd"
   endpoint {
     name: "bitwise.BitwiseAnd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseOr.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseOr.pbtxt
index 8f9d1bc2fe4..45796b0bf30 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseOr.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseOr.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BitwiseOr"
   endpoint {
     name: "bitwise.BitwiseOr"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseXor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseXor.pbtxt
index 28f405b8ada..c83fee544c6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseXor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BitwiseXor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BitwiseXor"
   endpoint {
     name: "bitwise.BitwiseXor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMGradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMGradV2.pbtxt
index ed3ee5ce114..d88c6c62f86 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMGradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMGradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BlockLSTMGradV2"
   endpoint {
     name: "nn.BlockLSTMGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMV2.pbtxt
index b28283c1332..f20e824d7dc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BlockLSTMV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BlockLSTMV2"
   endpoint {
     name: "nn.BlockLSTM"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesAggregateStats.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesAggregateStats.pbtxt
index 3d48e24f535..58978e6b6ba 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesAggregateStats.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesAggregateStats.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesAggregateStats"
   endpoint {
     name: "estimator.BoostedTreesAggregateStats"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesBucketize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesBucketize.pbtxt
index 9580285ea0e..d55fffeb182 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesBucketize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesBucketize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesBucketize"
   endpoint {
     name: "estimator.BoostedTreesBucketize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplit.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplit.pbtxt
index d175e607580..43ce3d8a8b8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplit.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplit.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCalculateBestFeatureSplit"
   endpoint {
     name: "estimator.BoostedTreesCalculateBestFeatureSplit"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplitV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplitV2.pbtxt
index 3af78f46d8d..d920e9bf6b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplitV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestFeatureSplitV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCalculateBestFeatureSplitV2"
   endpoint {
     name: "estimator.BoostedTreesCalculateBestFeatureSplitV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestGainsPerFeature.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestGainsPerFeature.pbtxt
index 87685cceff3..cab624efd61 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestGainsPerFeature.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCalculateBestGainsPerFeature.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCalculateBestGainsPerFeature"
   endpoint {
     name: "estimator.BoostedTreesCalculateBestGainsPerFeature"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCenterBias.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCenterBias.pbtxt
index d4c4e63ffb6..055cb5b067d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCenterBias.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCenterBias.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCenterBias"
   endpoint {
     name: "estimator.BoostedTreesCenterBias"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateEnsemble.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateEnsemble.pbtxt
index ecd5599e03d..01e25eb2270 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateEnsemble.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateEnsemble.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCreateEnsemble"
   endpoint {
     name: "estimator.BoostedTreesCreateEnsemble"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateQuantileStreamResource.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateQuantileStreamResource.pbtxt
index 42091b1f414..7105d2a13ca 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateQuantileStreamResource.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesCreateQuantileStreamResource.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesCreateQuantileStreamResource"
   endpoint {
     name: "estimator.BoostedTreesCreateQuantileStreamResource"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesDeserializeEnsemble.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesDeserializeEnsemble.pbtxt
index b01487ac55c..7dbb508bad1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesDeserializeEnsemble.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesDeserializeEnsemble.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesDeserializeEnsemble"
   endpoint {
     name: "estimator.BoostedTreesDeserializeEnsemble"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesEnsembleResourceHandleOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesEnsembleResourceHandleOp.pbtxt
index 36437e93985..43f0f618a9d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesEnsembleResourceHandleOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesEnsembleResourceHandleOp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesEnsembleResourceHandleOp"
   endpoint {
     name: "estimator.BoostedTreesEnsembleResourceHandleOp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesExampleDebugOutputs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesExampleDebugOutputs.pbtxt
index 83c98a3ed17..0768f7ea464 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesExampleDebugOutputs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesExampleDebugOutputs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesExampleDebugOutputs"
   endpoint {
     name: "estimator.BoostedTreesExampleDebugOutputs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesFlushQuantileSummaries.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesFlushQuantileSummaries.pbtxt
index 2b42aaa4487..c5949350c42 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesFlushQuantileSummaries.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesFlushQuantileSummaries.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesFlushQuantileSummaries"
   endpoint {
     name: "estimator.BoostedTreesFlushQuantileSummaries"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesGetEnsembleStates.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesGetEnsembleStates.pbtxt
index f02b68487ac..1973e3ce0b6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesGetEnsembleStates.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesGetEnsembleStates.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesGetEnsembleStates"
   endpoint {
     name: "estimator.BoostedTreesGetEnsembleStates"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeQuantileSummaries.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeQuantileSummaries.pbtxt
index c89c68d527d..f4de8855e9a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeQuantileSummaries.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeQuantileSummaries.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesMakeQuantileSummaries"
   endpoint {
     name: "estimator.BoostedTreesMakeQuantileSummaries"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeStatsSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeStatsSummary.pbtxt
index e39d6a2495c..5414e2aae97 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeStatsSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesMakeStatsSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesMakeStatsSummary"
   endpoint {
     name: "estimator.BoostedTreesMakeStatsSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesPredict.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesPredict.pbtxt
index 59aab04d53a..7c93fcfdfc2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesPredict.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesPredict.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesPredict"
   endpoint {
     name: "estimator.BoostedTreesPredict"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceAddSummaries.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceAddSummaries.pbtxt
index 656360418a9..ab449a57d5c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceAddSummaries.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceAddSummaries.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesQuantileStreamResourceAddSummaries"
   endpoint {
     name: "estimator.BoostedTreesQuantileStreamResourceAddSummaries"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceDeserialize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceDeserialize.pbtxt
index 5fde27fd5b6..45103ae088a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceDeserialize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceDeserialize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesQuantileStreamResourceDeserialize"
   endpoint {
     name: "estimator.BoostedTreesQuantileStreamResourceDeserialize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceFlush.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceFlush.pbtxt
index 2c9b8936d74..16b68e4ac83 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceFlush.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceFlush.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesQuantileStreamResourceFlush"
   endpoint {
     name: "estimator.BoostedTreesQuantileStreamResourceFlush"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt
index 1d4ff1aeece..990abb4effe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesQuantileStreamResourceGetBucketBoundaries"
   endpoint {
     name: "estimator.BoostedTreesQuantileStreamResourceGetBucketBoundaries"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceHandleOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceHandleOp.pbtxt
index 599bf156074..12600896ec9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceHandleOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesQuantileStreamResourceHandleOp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesQuantileStreamResourceHandleOp"
   endpoint {
     name: "estimator.BoostedTreesQuantileStreamResourceHandleOp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSerializeEnsemble.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSerializeEnsemble.pbtxt
index 106a8a98309..5880c132063 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSerializeEnsemble.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSerializeEnsemble.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesSerializeEnsemble"
   endpoint {
     name: "estimator.BoostedTreesSerializeEnsemble"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseAggregateStats.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseAggregateStats.pbtxt
index 0493060496b..109f3bae4e2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseAggregateStats.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseAggregateStats.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesSparseAggregateStats"
   endpoint {
     name: "estimator.BoostedTreesSparseAggregateStats"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseCalculateBestFeatureSplit.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseCalculateBestFeatureSplit.pbtxt
index 81067bb6cbd..aae4c225f7e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseCalculateBestFeatureSplit.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesSparseCalculateBestFeatureSplit.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesSparseCalculateBestFeatureSplit"
   endpoint {
     name: "estimator.BoostedTreesSparseCalculateBestFeatureSplit"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesTrainingPredict.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesTrainingPredict.pbtxt
index 4d1fa91560e..d4696dc6182 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesTrainingPredict.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesTrainingPredict.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesTrainingPredict"
   endpoint {
     name: "estimator.BoostedTreesTrainingPredict"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsemble.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsemble.pbtxt
index 7a330f04e2e..77f30bc409f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsemble.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsemble.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesUpdateEnsemble"
   endpoint {
     name: "estimator.BoostedTreesUpdateEnsemble"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsembleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsembleV2.pbtxt
index f4e1665b61c..df4e978b422 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsembleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BoostedTreesUpdateEnsembleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "BoostedTreesUpdateEnsembleV2"
   endpoint {
     name: "estimator.BoostedTreesUpdateEnsembleV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastArgs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastArgs.pbtxt
index 484742a2d02..ebc44eacd85 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastArgs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastArgs.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BroadcastArgs"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastGradientArgs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastGradientArgs.pbtxt
index 50f631b2a69..6e6f0d1b9b7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastGradientArgs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastGradientArgs.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BroadcastGradientArgs"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastTo.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastTo.pbtxt
index 127458816ce..c5b07af0a18 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastTo.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_BroadcastTo.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "BroadcastTo"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bucketize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bucketize.pbtxt
index 5a99712fd6c..a600ac3634d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Bucketize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Bucketize.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Bucketize"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixComponents.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixComponents.pbtxt
index 552f52a21a9..24b7e34e16b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixComponents.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixComponents.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CSRSparseMatrixComponents"
   endpoint {
     name: "linalg.sparse.CSRSparseMatrixComponents"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToDense.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToDense.pbtxt
index d85bf95da9c..62baeff7b47 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToDense.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToDense.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CSRSparseMatrixToDense"
   endpoint {
     name: "linalg.sparse.CSRSparseMatrixToDense"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToSparseTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToSparseTensor.pbtxt
index 7dc0ac62652..6be3fd9219b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToSparseTensor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CSRSparseMatrixToSparseTensor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CSRSparseMatrixToSparseTensor"
   endpoint {
     name: "linalg.sparse.CSRSparseMatrixToSparseTensor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCBeamSearchDecoder.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCBeamSearchDecoder.pbtxt
index 39739f03a31..113d683f6be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCBeamSearchDecoder.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCBeamSearchDecoder.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CTCBeamSearchDecoder"
   endpoint {
     name: "nn.CtcBeamSearchDecoder"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCGreedyDecoder.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCGreedyDecoder.pbtxt
index 009742f0973..f82f1789f23 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCGreedyDecoder.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCGreedyDecoder.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CTCGreedyDecoder"
   endpoint {
     name: "nn.CtcGreedyDecoder"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLoss.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLoss.pbtxt
index dbeefa40171..0c4d2f7843a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLoss.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLoss.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CTCLoss"
   endpoint {
     name: "nn.CtcLoss"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLossV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLossV2.pbtxt
index 97df8d7a826..4ea107e1445 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLossV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CTCLossV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CTCLossV2"
   endpoint {
     name: "nn.CTCLossV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Case.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Case.pbtxt
index f1da05d19f2..eb371486f04 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Case.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Case.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Case"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cast.pbtxt
index ea9f812e2a1..bd6b1b27204 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cast.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cast"
   endpoint {
     name: "dtypes.Cast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Ceil.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Ceil.pbtxt
index d1a75f27d9a..41c23c44712 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Ceil.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Ceil.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Ceil"
   endpoint {
     name: "math.Ceil"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckNumericsV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckNumericsV2.pbtxt
index cfae1f1cd11..3085f985715 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckNumericsV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckNumericsV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CheckNumericsV2"
   endpoint {
     name: "debugging.CheckNumerics"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckPinned.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckPinned.pbtxt
new file mode 100644
index 00000000000..fff873c9bbf
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CheckPinned.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "CheckPinned"
+  endpoint {
+    name: "CheckPinned"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cholesky.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cholesky.pbtxt
index a60c4e3663e..0c1f48317d1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cholesky.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cholesky.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cholesky"
   endpoint {
     name: "linalg.Cholesky"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CholeskyGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CholeskyGrad.pbtxt
index 2601d415542..22e4aa89a6f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CholeskyGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CholeskyGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CholeskyGrad"
   endpoint {
     name: "linalg.CholeskyGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ClipByValue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ClipByValue.pbtxt
index c4e055c117c..b6c8fae964f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ClipByValue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ClipByValue.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ClipByValue"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CloseSummaryWriter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CloseSummaryWriter.pbtxt
index d5fbe557db0..2d1ca9631d3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CloseSummaryWriter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CloseSummaryWriter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CloseSummaryWriter"
   endpoint {
     name: "summary.CloseSummaryWriter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollateTPUEmbeddingMemory.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollateTPUEmbeddingMemory.pbtxt
index 9821c816ce5..7e2b1aef93b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollateTPUEmbeddingMemory.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollateTPUEmbeddingMemory.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollateTPUEmbeddingMemory"
   endpoint {
     name: "tpu.CollateTPUEmbeddingMemory"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAllToAllV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAllToAllV3.pbtxt
index 7b45fb28bc9..b2356ee5b36 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAllToAllV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAllToAllV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveAllToAllV3"
   endpoint {
     name: "collective.CollectiveAllToAll"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAssignGroupV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAssignGroupV2.pbtxt
index 03e7c97aeb5..d414cd66079 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAssignGroupV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveAssignGroupV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveAssignGroupV2"
   endpoint {
     name: "collective.CollectiveAssignGroup"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastRecvV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastRecvV2.pbtxt
index c7d78492cab..be74a35b7f9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastRecvV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastRecvV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveBcastRecvV2"
   endpoint {
     name: "collective.CollectiveBcastRecv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastSendV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastSendV2.pbtxt
index 9eb747f9a4b..1fb22afed54 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastSendV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveBcastSendV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveBcastSendV2"
   endpoint {
     name: "collective.CollectiveBcastSend"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveGatherV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveGatherV2.pbtxt
index a9179d98926..d220f2ab11f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveGatherV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveGatherV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveGatherV2"
   endpoint: {
     name: "collective.CollectiveGather"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveInitializeCommunicator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveInitializeCommunicator.pbtxt
index de44c83cdf0..fba9e620843 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveInitializeCommunicator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveInitializeCommunicator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveInitializeCommunicator"
   endpoint {
     name: "collective.CollectiveInitializeCommunicator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectivePermute.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectivePermute.pbtxt
index 490242ba9c5..5fa5a659df4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectivePermute.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectivePermute.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectivePermute"
   endpoint {
     name: "collective.CollectivePermute"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceScatterV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceScatterV2.pbtxt
index 07cf7f4a7c0..b36c3830ca1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceScatterV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceScatterV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveReduceScatterV2"
   endpoint {
     name: "collective.CollectiveReduceScatter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceV3.pbtxt
index c7234159624..3a2779461d2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CollectiveReduceV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CollectiveReduceV3"
   endpoint {
     name: "collective.CollectiveReduce"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CombinedNonMaxSuppression.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CombinedNonMaxSuppression.pbtxt
index 44baf4d4b3e..836a46a42b2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CombinedNonMaxSuppression.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CombinedNonMaxSuppression.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CombinedNonMaxSuppression"
   endpoint {
     name: "image.CombinedNonMaxSuppression"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompareAndBitpack.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompareAndBitpack.pbtxt
index d744fbbc90f..4e5a5e1a2af 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompareAndBitpack.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompareAndBitpack.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CompareAndBitpack"
   endpoint {
     name: "math.CompareAndBitpack"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Complex.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Complex.pbtxt
index 4889360a96a..f649707afb8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Complex.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Complex.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Complex"
   endpoint {
     name: "dtypes.Complex"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComplexAbs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComplexAbs.pbtxt
index 42a6a3c6a1c..be6aa59c92e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComplexAbs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComplexAbs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ComplexAbs"
   endpoint {
     name: "math.ComplexAbs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantFromComponents.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantFromComponents.pbtxt
index 2a115714525..adb638940d8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantFromComponents.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantFromComponents.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CompositeTensorVariantFromComponents"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantToComponents.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantToComponents.pbtxt
index 49f121eef4a..b34054ead77 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantToComponents.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompositeTensorVariantToComponents.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CompositeTensorVariantToComponents"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompressElement.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompressElement.pbtxt
index 08158de13a5..09a543581d2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CompressElement.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CompressElement.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CompressElement"
   endpoint {
     name: "data.CompressElement"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeAccidentalHits.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeAccidentalHits.pbtxt
index ca9e590fbce..8c4d834016b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeAccidentalHits.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeAccidentalHits.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ComputeAccidentalHits"
   endpoint {
     name: "nn.ComputeAccidentalHits"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeBatchSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeBatchSize.pbtxt
index fd383570c1a..826f51ac87d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeBatchSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeBatchSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ComputeBatchSize"
   endpoint {
     name: "train.ComputeBatchSize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSize.pbtxt
new file mode 100644
index 00000000000..3bedfe49d78
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSize.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "ComputeDedupDataSize"
+  visibility: SKIP
+  endpoint {
+    name: "tpu.ComputeDedupDataSize"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSizeV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSizeV2.pbtxt
new file mode 100644
index 00000000000..af5bdc31f13
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataSizeV2.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "ComputeDedupDataSizeV2"
+  endpoint {
+    name: "tpu.ComputeDedupDataSize"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMask.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMask.pbtxt
index 933b66ddf4e..cb0cd71c3f3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMask.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMask.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "ComputeDedupDataTupleMask"
   endpoint {
     name: "tpu.ComputeDedupDataTupleMask"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMaskV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMaskV2.pbtxt
new file mode 100644
index 00000000000..75e34703b13
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ComputeDedupDataTupleMaskV2.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "ComputeDedupDataTupleMaskV2"
+  endpoint {
+    name: "tpu.ComputeDedupDataTupleMask"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatOffset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatOffset.pbtxt
index e8e23cf5593..876db502770 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatOffset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatOffset.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConcatOffset"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatV2.pbtxt
index 7035796981f..9bf9a9b8648 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConcatV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConcatV2"
   endpoint {
     name: "Concat"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConditionalAccumulator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConditionalAccumulator.pbtxt
index 08431982daa..3e8dd5299a1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConditionalAccumulator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConditionalAccumulator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConditionalAccumulator"
   endpoint {
     name: "train.ConditionalAccumulator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureAndInitializeGlobalTPU.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureAndInitializeGlobalTPU.pbtxt
index 9e41e0fad15..5ee6c848dee 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureAndInitializeGlobalTPU.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureAndInitializeGlobalTPU.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConfigureAndInitializeGlobalTPU"
   endpoint {
     name: "tpu.ConfigureAndInitializeGlobalTPU"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureDistributedTPU.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureDistributedTPU.pbtxt
index 2722e41b916..1dc468d8666 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureDistributedTPU.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureDistributedTPU.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConfigureDistributedTPU"
   endpoint {
     name: "tpu.ConfigureDistributedTPU"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbedding.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbedding.pbtxt
index 74bf19cfa9d..1cd8caf6d34 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbedding.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbedding.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConfigureTPUEmbedding"
   endpoint {
     name: "tpu.ConfigureTPUEmbedding"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingHost.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingHost.pbtxt
index 7095f59d3ae..aa4265b80ba 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingHost.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingHost.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConfigureTPUEmbeddingHost"
   endpoint {
     name: "tpu.ConfigureTPUEmbeddingHost"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingMemory.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingMemory.pbtxt
index f72a9a9804d..51b142d5c15 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingMemory.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConfigureTPUEmbeddingMemory.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConfigureTPUEmbeddingMemory"
   endpoint {
     name: "tpu.ConfigureTPUEmbeddingMemory"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conj.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conj.pbtxt
index 7de199b55fa..0fb1ddc5788 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conj.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conj.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conj"
   endpoint {
     name: "math.Conj"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConjugateTranspose.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConjugateTranspose.pbtxt
index 42173088ae0..42fad3b7ee6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConjugateTranspose.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConjugateTranspose.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConjugateTranspose"
   endpoint {
     name: "linalg.ConjugateTranspose"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConnectTPUEmbeddingHosts.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConnectTPUEmbeddingHosts.pbtxt
index a59a9718d1f..030cd71468e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConnectTPUEmbeddingHosts.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConnectTPUEmbeddingHosts.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConnectTPUEmbeddingHosts"
   endpoint {
     name: "tpu.ConnectTPUEmbeddingHosts"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConsumeMutexLock.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConsumeMutexLock.pbtxt
index 1e0d136bc2f..78c8099b9ac 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConsumeMutexLock.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConsumeMutexLock.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ConsumeMutexLock"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ControlTrigger.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ControlTrigger.pbtxt
index 4517b4373f3..8dc64a98773 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ControlTrigger.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ControlTrigger.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ControlTrigger"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv.pbtxt
index 17ce54871b6..cdc59f52e68 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conv"
   endpoint {
     name: "nn.Conv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv2D.pbtxt
index 21d1398e098..1752f424f38 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conv2D"
   endpoint {
     name: "nn.Conv2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3D.pbtxt
index 6ee1befcff1..abafc5a703d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conv3D"
   endpoint {
     name: "nn.Conv3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropFilterV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropFilterV2.pbtxt
index 0643cc14a9a..257a0e6f7fe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropFilterV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropFilterV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conv3DBackpropFilterV2"
   endpoint {
     name: "nn.Conv3dBackpropFilter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropInputV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropInputV2.pbtxt
index 33c8f5a3ce0..e192e5feedc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropInputV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Conv3DBackpropInputV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Conv3DBackpropInputV2"
   endpoint {
     name: "nn.Conv3dBackpropInput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToCooTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToCooTensor.pbtxt
new file mode 100644
index 00000000000..3047ada98b7
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToCooTensor.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "ConvertToCooTensor"
+  visibility: VISIBLE
+  endpoint {
+    name: "tpu.ConvertToCooTensor"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToListOfSparseCoreCooTensors.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToListOfSparseCoreCooTensors.pbtxt
new file mode 100644
index 00000000000..99d2ebea438
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToListOfSparseCoreCooTensors.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "ConvertToListOfSparseCoreCooTensors"
+  visibility: VISIBLE
+  endpoint {
+    name: "sparse.ConvertToListOfSparseCoreCooTensors"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToSparseCoreCsrWrappedCooTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToSparseCoreCsrWrappedCooTensor.pbtxt
new file mode 100644
index 00000000000..6b78c0b216c
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ConvertToSparseCoreCsrWrappedCooTensor.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "ConvertToSparseCoreCsrWrappedCooTensor"
+  visibility: VISIBLE
+  endpoint {
+    name: "sparse.ConvertToSparseCoreCsrWrappedCooTensor"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMesh.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMesh.pbtxt
index 4228903ebde..e70bf4ade58 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMesh.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMesh.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CopyToMesh"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMeshGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMeshGrad.pbtxt
index 0780d5e0dcc..5e3d38dd349 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMeshGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CopyToMeshGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CopyToMeshGrad"
   endpoint {
     name: "CopyToMeshGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cos.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cos.pbtxt
index db1f62806e2..a8006cadd6b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cos.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cos.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cos"
   endpoint {
     name: "math.Cos"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cosh.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cosh.pbtxt
index a4b5e752bf5..6f08a1b1862 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cosh.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cosh.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cosh"
   endpoint {
     name: "math.Cosh"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CountUpTo.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CountUpTo.pbtxt
index eb9f328ce0c..bdc63ba1e04 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CountUpTo.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CountUpTo.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CountUpTo"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryDbWriter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryDbWriter.pbtxt
index 299f881dd44..0c9840034b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryDbWriter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryDbWriter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CreateSummaryDbWriter"
   endpoint {
     name: "summary.CreateSummaryDbWriter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryFileWriter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryFileWriter.pbtxt
index 26c7941ce57..b85f13b6de4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryFileWriter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CreateSummaryFileWriter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CreateSummaryFileWriter"
   endpoint {
     name: "summary.CreateSummaryFileWriter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResize.pbtxt
index cbf9aa8f996..b41932cf5ab 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CropAndResize"
   endpoint {
     name: "image.CropAndResize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradBoxes.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradBoxes.pbtxt
index 44354bdfa03..8b29c975468 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradBoxes.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradBoxes.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CropAndResizeGradBoxes"
   endpoint {
     name: "image.CropAndResizeGradBoxes"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradImage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradImage.pbtxt
index 0618db9a8d7..85607c39878 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradImage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CropAndResizeGradImage.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CropAndResizeGradImage"
   endpoint {
     name: "image.CropAndResizeGradImage"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cross.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cross.pbtxt
index c027884250e..a9717d3bc7d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cross.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cross.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cross"
   endpoint {
     name: "linalg.Cross"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CrossReplicaSum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CrossReplicaSum.pbtxt
index 5ef9b3b586e..f83642ef04a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CrossReplicaSum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CrossReplicaSum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CrossReplicaSum"
   endpoint {
     name: "tpu.CrossReplicaSum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNBackpropV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNBackpropV3.pbtxt
index 03875605e8a..eb7800c71df 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNBackpropV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNBackpropV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CudnnRNNBackpropV3"
   endpoint {
     name: "nn.CudnnRNNBackprop"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNCanonicalToParamsV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNCanonicalToParamsV2.pbtxt
index 1c2cd136bf8..99b144ed11c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNCanonicalToParamsV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNCanonicalToParamsV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CudnnRNNCanonicalToParamsV2"
   endpoint {
     name: "nn.CudnnRNNCanonicalToParams"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsSize.pbtxt
index 3f1193fe606..e0b34db1680 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CudnnRNNParamsSize"
   endpoint {
     name: "nn.CudnnRnnParamsSize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsToCanonicalV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsToCanonicalV2.pbtxt
index fa2fd5ffbca..4542b63afcc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsToCanonicalV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNParamsToCanonicalV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CudnnRNNParamsToCanonicalV2"
   endpoint {
     name: "nn.CudnnRNNParamsToCanonical"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNV3.pbtxt
index 5e2ddba15d1..0e07477c874 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CudnnRNNV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CudnnRNNV3"
   endpoint {
     name: "nn.CudnnRNN"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumprod.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumprod.pbtxt
index 0cb7862413d..b49217a6d13 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumprod.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumprod.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cumprod"
   endpoint {
     name: "math.Cumprod"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumsum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumsum.pbtxt
index e7d90765326..30db71c3b58 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumsum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Cumsum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Cumsum"
   endpoint {
     name: "math.Cumsum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_CumulativeLogsumexp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_CumulativeLogsumexp.pbtxt
index 5d68409a4cb..5e815bd9dab 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_CumulativeLogsumexp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_CumulativeLogsumexp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "CumulativeLogsumexp"
   endpoint {
     name: "math.CumulativeLogsumexp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorRestoreV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorRestoreV2.pbtxt
index 23d09b99663..c494af28b78 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorRestoreV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorRestoreV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DTensorRestoreV2"
   endpoint {
     name: "tpu.DTensorRestore"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorSetGlobalTPUArray.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorSetGlobalTPUArray.pbtxt
index 512ac74139d..9eb54c892bb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorSetGlobalTPUArray.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorSetGlobalTPUArray.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DTensorSetGlobalTPUArray"
   endpoint {
     name: "tpu.ExecuteTPUEmbeddingPartitioner"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorShardedPrefix.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorShardedPrefix.pbtxt
index d3eb775b2d7..28a477a0351 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorShardedPrefix.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DTensorShardedPrefix.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DTensorShardedPrefix"
   endpoint {
     name: "tpu.DTensorShardedPrefix"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatDimMap.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatDimMap.pbtxt
index 36ea17793fd..8d1015ddf8a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatDimMap.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatDimMap.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DataFormatDimMap"
   endpoint {
     name: "nn.DataFormatDimMap"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatVecPermute.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatVecPermute.pbtxt
index b6b7e2dc767..61766b93905 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatVecPermute.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DataFormatVecPermute.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DataFormatVecPermute"
   endpoint {
     name: "nn.DataFormatVecPermute"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DatasetFingerprint.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DatasetFingerprint.pbtxt
new file mode 100644
index 00000000000..61e0086729b
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DatasetFingerprint.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "DatasetFingerprint"
+  visibility: VISIBLE
+  endpoint {
+    name: "data.DatasetFingerprint"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dawsn.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dawsn.pbtxt
index 253b06bcd4a..8cd2717a601 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dawsn.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dawsn.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Dawsn"
   endpoint {
     name: "math.special.Dawsn"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeAndCropJpeg.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeAndCropJpeg.pbtxt
index c07bb7a1bdf..13ffab4d225 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeAndCropJpeg.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeAndCropJpeg.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeAndCropJpeg"
   endpoint {
     name: "image.DecodeAndCropJpeg"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBase64.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBase64.pbtxt
index 49c93453f7b..6d091e3a52e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBase64.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBase64.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeBase64"
   endpoint {
     name: "io.DecodeBase64"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBmp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBmp.pbtxt
index 049cfa153d1..03f5e2d7aa0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBmp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeBmp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeBmp"
   endpoint {
     name: "image.DecodeBmp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCSV.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCSV.pbtxt
index 1d60107adab..f8c881d807f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCSV.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCSV.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeCSV"
   endpoint {
     name: "io.DecodeCsv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCompressed.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCompressed.pbtxt
index 91327a92ecb..e688002e944 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCompressed.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeCompressed.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeCompressed"
   endpoint {
     name: "io.DecodeCompressed"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeGif.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeGif.pbtxt
index 355643ff77c..ac36d9bc1f2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeGif.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeGif.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeGif"
   endpoint {
     name: "image.DecodeGif"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeImage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeImage.pbtxt
index 2cc6d31d1d1..80516c0e1b1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeImage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeImage.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeImage"
   endpoint {
     name: "image.DecodeImage"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJSONExample.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJSONExample.pbtxt
index 6ecba5ab053..d78f8891a22 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJSONExample.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJSONExample.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeJSONExample"
   endpoint {
     name: "io.DecodeJsonExample"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJpeg.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJpeg.pbtxt
index c0ebf2e315f..f1d5b1238d9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJpeg.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeJpeg.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeJpeg"
   endpoint {
     name: "image.DecodeJpeg"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePaddedRaw.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePaddedRaw.pbtxt
index ed6dae2be3c..daaabcd76c4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePaddedRaw.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePaddedRaw.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodePaddedRaw"
   endpoint {
     name: "io.DecodePaddedRaw"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePng.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePng.pbtxt
index d94537dc928..aed9c898a29 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePng.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodePng.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodePng"
   endpoint {
     name: "image.DecodePng"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeProtoV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeProtoV2.pbtxt
index 4ba118cb0e6..b831161f690 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeProtoV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeProtoV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeProtoV2"
   endpoint {
     name: "DecodeProto"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeRaw.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeRaw.pbtxt
index 73067173edd..d91490cc854 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeRaw.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeRaw.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeRaw"
   endpoint {
     name: "io.DecodeRaw"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeWav.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeWav.pbtxt
index 9b249cc6e95..f63a147de11 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeWav.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DecodeWav.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DecodeWav"
   endpoint {
     name: "audio.DecodeWav"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeepCopy.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeepCopy.pbtxt
index 88a87c92918..e55a4c21ffe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeepCopy.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeepCopy.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeepCopy"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMemoryCache.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMemoryCache.pbtxt
index c7ab9d3c90a..e9ddbda3ed9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMemoryCache.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMemoryCache.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeleteMemoryCache"
   endpoint {
     name: "data.DeleteMemoryCache"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMultiDeviceIterator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMultiDeviceIterator.pbtxt
index b788df4402e..b93b8c3541e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMultiDeviceIterator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteMultiDeviceIterator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeleteMultiDeviceIterator"
   endpoint {
     name: "data.DeleteMultiDeviceIterator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteRandomSeedGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteRandomSeedGenerator.pbtxt
index fc61e02eba8..f1d06eccdbb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteRandomSeedGenerator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteRandomSeedGenerator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeleteRandomSeedGenerator"
   endpoint {
     name: "random.DeleteRandomSeedGenerator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSeedGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSeedGenerator.pbtxt
index 48a546f3b83..24e5394bf3f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSeedGenerator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSeedGenerator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeleteSeedGenerator"
   endpoint {
     name: "random.DeleteSeedGenerator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSessionTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSessionTensor.pbtxt
index 1865b461de7..a7e2ca5bfed 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSessionTensor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeleteSessionTensor.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeleteSessionTensor"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseBincount.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseBincount.pbtxt
index aeacb17d512..38af1580e36 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseBincount.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseBincount.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DenseBincount"
   endpoint {
     name: "math.DenseBincount"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseCountSparseOutput.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseCountSparseOutput.pbtxt
index fca17ecfa79..6496cb1c446 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseCountSparseOutput.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseCountSparseOutput.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DenseCountSparseOutput"
   endpoint {
     name: "sparse.DenseCountSparseOutput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToCSRSparseMatrix.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToCSRSparseMatrix.pbtxt
index a8186dc4957..dc7ecd1a204 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToCSRSparseMatrix.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToCSRSparseMatrix.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DenseToCSRSparseMatrix"
   endpoint {
     name: "linalg.sparse.DenseToCSRSparseMatrix"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToDenseSetOperation.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToDenseSetOperation.pbtxt
index f85def92ee3..8772c2c0e3a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToDenseSetOperation.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToDenseSetOperation.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DenseToDenseSetOperation"
   endpoint {
     name: "sparse.DenseToDenseSetOperation"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToSparseSetOperation.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToSparseSetOperation.pbtxt
index 11fbef8ff1f..80455026338 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToSparseSetOperation.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DenseToSparseSetOperation.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DenseToSparseSetOperation"
   endpoint {
     name: "sparse.DenseToSparseSetOperation"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthToSpace.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthToSpace.pbtxt
index 0d2cbd2b904..da338027869 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthToSpace.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthToSpace.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DepthToSpace"
   endpoint {
     name: "nn.DepthToSpace"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNative.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNative.pbtxt
index 1aaa480fefd..eb20bbab725 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNative.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNative.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DepthwiseConv2dNative"
   endpoint {
     name: "nn.DepthwiseConv2dNative"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
index 1a62d8cf632..e534f662ea2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
   endpoint {
     name: "nn.DepthwiseConv2dNativeBackpropFilter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
index 9106dd2f8fe..892160034cd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DepthwiseConv2dNativeBackpropInput"
   endpoint {
     name: "nn.DepthwiseConv2dNativeBackpropInput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dequantize.pbtxt
index 8ee4daa2f7e..7b32cd14882 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Dequantize"
   endpoint {
     name: "quantization.Dequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeIterator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeIterator.pbtxt
index bdd03f5dc67..cb296d27127 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeIterator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeIterator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeserializeIterator"
   endpoint {
     name: "data.DeserializeIterator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeManySparse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeManySparse.pbtxt
index 826d49f5465..b57141ed844 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeManySparse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeManySparse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeserializeManySparse"
   endpoint {
     name: "io.DeserializeManySparse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeSparse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeSparse.pbtxt
index e6f24bb6257..8b46d1060b8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeSparse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeserializeSparse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeserializeSparse"
   endpoint {
     name: "sparse.DeserializeSparse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyResourceOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyResourceOp.pbtxt
index 733e5e5029c..dbdcf2f0cea 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyResourceOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyResourceOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DestroyResourceOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyTemporaryVariable.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyTemporaryVariable.pbtxt
index bd416eb68fb..e9f167bd1fc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyTemporaryVariable.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DestroyTemporaryVariable.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DestroyTemporaryVariable"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeviceIndex.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeviceIndex.pbtxt
index 1a656ad1290..de7b5bc2b58 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DeviceIndex.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DeviceIndex.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DeviceIndex"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Diag.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Diag.pbtxt
index 374b3c97e17..de116a55651 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Diag.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Diag.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Diag"
   endpoint {
     name: "linalg.TensorDiag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DiagPart.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DiagPart.pbtxt
index 70db2357d06..b9ef4010d99 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DiagPart.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DiagPart.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DiagPart"
   endpoint {
     name: "linalg.TensorDiagPart"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Digamma.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Digamma.pbtxt
index 68dc74c64ea..fafcf4cc8bc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Digamma.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Digamma.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Digamma"
   endpoint {
     name: "math.Digamma"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2D.pbtxt
index 914ea29812c..523cf20b08d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Dilation2D"
   endpoint {
     name: "nn.Dilation2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropFilter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropFilter.pbtxt
index db3c68e088e..0b7b84c8b5d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropFilter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropFilter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Dilation2DBackpropFilter"
   endpoint {
     name: "nn.Dilation2dBackpropFilter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropInput.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropInput.pbtxt
index c935144f7af..c8d15a56c8b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropInput.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Dilation2DBackpropInput.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Dilation2DBackpropInput"
   endpoint {
     name: "nn.Dilation2dBackpropInput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DisableCopyOnRead.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DisableCopyOnRead.pbtxt
index aec65bf4c08..4e6dae43607 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DisableCopyOnRead.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DisableCopyOnRead.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DisableCopyOnRead"
   endpoint {
     name: "io.DisableCopyOnRead"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DistributedSave.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DistributedSave.pbtxt
index 06d244d83bc..74a8b4ddfc1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DistributedSave.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DistributedSave.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DistributedSave"
   endpoint {
     name: "train.DistributedSave"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Div.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Div.pbtxt
index 2abba7f05f3..70007de3ae0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Div.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Div.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Div"
   endpoint {
     name: "math.Div"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DivNoNan.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DivNoNan.pbtxt
index c124044604b..c8dcc9f80aa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DivNoNan.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DivNoNan.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DivNoNan"
   endpoint {
     name: "math.DivNoNan"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DrawBoundingBoxesV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DrawBoundingBoxesV2.pbtxt
index 76108a4085b..1a1bcc3c284 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DrawBoundingBoxesV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DrawBoundingBoxesV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DrawBoundingBoxesV2"
   endpoint {
     name: "image.DrawBoundingBoxes"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyIterationCounter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyIterationCounter.pbtxt
index 12b6ffbf5fa..837647279de 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyIterationCounter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyIterationCounter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DummyIterationCounter"
   endpoint {
     name: "data.DummyIterationCounter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyMemoryCache.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyMemoryCache.pbtxt
index 87fe4495514..ac86013215f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyMemoryCache.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummyMemoryCache.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DummyMemoryCache"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummySeedGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummySeedGenerator.pbtxt
index 4d550787d03..3d2cf2618ff 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DummySeedGenerator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DummySeedGenerator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DummySeedGenerator"
   endpoint {
     name: "random.DummySeedGenerator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
index 16882e5f1d3..935786de8fa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch"
   endpoint {
     name: "tpu.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
index 01a1c9189d2..2f59cca069b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DynamicEnqueueTPUEmbeddingRaggedTensorBatch"
   endpoint {
     name: "tpu.DynamicEnqueueTPUEmbeddingRaggedTensorBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicPartition.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicPartition.pbtxt
index cc585676e48..4550ff6fbbc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicPartition.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicPartition.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DynamicPartition"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicStitch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicStitch.pbtxt
index ac1fef4b6af..609515974a7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicStitch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_DynamicStitch.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "DynamicStitch"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EditDistance.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EditDistance.pbtxt
index ca65c2c6e58..8e6dabb659f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EditDistance.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EditDistance.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EditDistance"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Eig.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Eig.pbtxt
index b0aa6f99414..fb0d5c4b045 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Eig.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Eig.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Eig"
   endpoint {
     name: "linalg.Eig"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Einsum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Einsum.pbtxt
index 07e5db98ae8..fbfc95e1380 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Einsum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Einsum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Einsum"
   endpoint {
     name: "linalg.Einsum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Elu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Elu.pbtxt
index bfe8d972cf6..432d2a70692 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Elu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Elu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Elu"
   endpoint {
     name: "nn.Elu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EluGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EluGrad.pbtxt
index 3757357c000..e8722cc7d24 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EluGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EluGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EluGrad"
   endpoint {
     name: "nn.EluGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Empty.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Empty.pbtxt
index 6522f51d9dc..e2dfb53ab7b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Empty.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Empty.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Empty"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorList.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorList.pbtxt
index ef3f533964c..df92f263af1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorList.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorList.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EmptyTensorList"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorMap.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorMap.pbtxt
index cf5cf52c931..a2141d3fbd3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorMap.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EmptyTensorMap.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EmptyTensorMap"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeBase64.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeBase64.pbtxt
index 66f19def9ae..a060a92104d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeBase64.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeBase64.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodeBase64"
   endpoint {
     name: "io.EncodeBase64"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpeg.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpeg.pbtxt
index 1e151665f87..af995121608 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpeg.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpeg.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodeJpeg"
   endpoint {
     name: "image.EncodeJpeg"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpegVariableQuality.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpegVariableQuality.pbtxt
index f18b353938c..bb8eeba21b3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpegVariableQuality.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeJpegVariableQuality.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodeJpegVariableQuality"
   endpoint {
     name: "image.EncodeJpegVariableQuality"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodePng.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodePng.pbtxt
index 7a8d713c865..b806e4917ff 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodePng.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodePng.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodePng"
   endpoint {
     name: "image.EncodePng"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeProto.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeProto.pbtxt
index ac6a04b4bc2..87b2c6ac4bc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeProto.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeProto.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodeProto"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeWav.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeWav.pbtxt
index f3b22fde666..96ed73270da 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeWav.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EncodeWav.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EncodeWav"
   endpoint {
     name: "audio.EncodeWav"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
index 90dc3c898d0..7335cf4e1cc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingArbitraryTensorBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingArbitraryTensorBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingBatch.pbtxt
index 6b698b2800b..a14d72b4a72 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingIntegerBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingIntegerBatch.pbtxt
index f4a6a104d5a..97b471f0ddd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingIntegerBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingIntegerBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingIntegerBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingIntegerBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
index 971fed264c9..d1d250dd27a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingRaggedTensorBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingRaggedTensorBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseBatch.pbtxt
index 16366328074..b346dd636a9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingSparseBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingSparseBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseTensorBatch.pbtxt
index 2f8dd050421..56864f899be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseTensorBatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnqueueTPUEmbeddingSparseTensorBatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnqueueTPUEmbeddingSparseTensorBatch"
   endpoint {
     name: "tpu.EnqueueTPUEmbeddingSparseTensorBatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnsureShape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnsureShape.pbtxt
index 6238947598f..4e7d8ac0a55 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EnsureShape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EnsureShape.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EnsureShape"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Enter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Enter.pbtxt
index ffc10c91beb..07abdf23784 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Enter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Enter.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Enter"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Equal.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Equal.pbtxt
index c2256c24337..afd1c9fcf85 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Equal.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Equal.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Equal"
   endpoint {
     name: "math.Equal"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erf.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erf.pbtxt
index 9efcc3983c4..0f3d2e6dc03 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erf.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erf.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Erf"
   endpoint {
     name: "math.Erf"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfc.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfc.pbtxt
index c0f4db61ff4..b1da0c02862 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfc.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfc.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Erfc"
   endpoint {
     name: "math.Erfc"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfinv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfinv.pbtxt
index 7937108c423..68358ebb137 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfinv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Erfinv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Erfinv"
   endpoint {
     name: "math.erfinv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_EuclideanNorm.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_EuclideanNorm.pbtxt
index f6a75752dfb..f4afae29cd7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_EuclideanNorm.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_EuclideanNorm.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "EuclideanNorm"
   endpoint {
     name: "linalg.EuclideanNorm"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExecuteTPUEmbeddingPartitioner.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExecuteTPUEmbeddingPartitioner.pbtxt
index 465325a17a0..125aeb61d93 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExecuteTPUEmbeddingPartitioner.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExecuteTPUEmbeddingPartitioner.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExecuteTPUEmbeddingPartitioner"
   endpoint {
     name: "tpu.ExecuteTPUEmbeddingPartitioner"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Exit.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Exit.pbtxt
index 6215cd22299..0ca26a5aa7d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Exit.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Exit.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Exit"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Exp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Exp.pbtxt
index b2790c8306f..7947019a666 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Exp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Exp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Exp"
   endpoint {
     name: "math.Exp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExpandDims.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExpandDims.pbtxt
index 66902ccb5b0..01c82186792 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExpandDims.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExpandDims.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExpandDims"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAssertNextDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAssertNextDataset.pbtxt
index efdfebb08b8..28e46dce87d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAssertNextDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAssertNextDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalAssertNextDataset"
   endpoint {
     name: "data.experimental.AssertNextDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAutoShardDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAutoShardDataset.pbtxt
index 1784a6b9a07..df08aef09b3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAutoShardDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalAutoShardDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalAutoShardDataset"
   endpoint {
     name: "data.experimental.AutoShardDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalBytesProducedStatsDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalBytesProducedStatsDataset.pbtxt
index 8e589b0d674..272f9e1eaae 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalBytesProducedStatsDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalBytesProducedStatsDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalBytesProducedStatsDataset"
   endpoint {
     name: "data.experimental.BytesProducedStatsDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalCSVDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalCSVDataset.pbtxt
index 217088d589c..d548e72a07a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalCSVDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalCSVDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalCSVDataset"
   endpoint {
     name:  "data.experimental.CSVDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalChooseFastestDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalChooseFastestDataset.pbtxt
index ea9dbd5ca43..d818de9d33e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalChooseFastestDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalChooseFastestDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalChooseFastestDataset"
   endpoint {
     name: "data.experimental.ChooseFastestDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetCardinality.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetCardinality.pbtxt
index cb6c9048eeb..743bc536a0f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetCardinality.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetCardinality.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalDatasetCardinality"
   endpoint {
     name: "data.experimental.DatasetCardinality"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetToTFRecord.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetToTFRecord.pbtxt
index 837258d5733..45ca0a09034 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetToTFRecord.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDatasetToTFRecord.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalDatasetToTFRecord"
   endpoint {
     name: "data.experimental.DatasetToTFRecord"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDenseToSparseBatchDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDenseToSparseBatchDataset.pbtxt
index c45c43efe6f..492eeee03a2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDenseToSparseBatchDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDenseToSparseBatchDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalDenseToSparseBatchDataset"
   endpoint {
     name: "data.experimental.DenseToSparseBatchDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDirectedInterleaveDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDirectedInterleaveDataset.pbtxt
index 9d8795e599b..d0acd7ea288 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDirectedInterleaveDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalDirectedInterleaveDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalDirectedInterleaveDataset"
   endpoint {
     name: "data.experimental.DirectedInterleaveDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResource.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResource.pbtxt
index fef2a0fd2f2..f35eca43ca4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResource.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResource.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalFunctionBufferingResource"
   endpoint {
     name: "data.experimental.FunctionBufferingResource"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceGetNext.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceGetNext.pbtxt
index 4c614345d59..e1b5ae6fac6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceGetNext.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceGetNext.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalFunctionBufferingResourceGetNext"
   endpoint {
     name: "data.experimental.FunctionBufferingResourceGetNext"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceReset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceReset.pbtxt
index b819eeab663..8c9bdb4de26 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceReset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalFunctionBufferingResourceReset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalFunctionBufferingResourceReset"
   endpoint {
     name: "data.experimental.FunctionBufferingResourceReset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByReducerDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByReducerDataset.pbtxt
index eeda8ab8552..8cf62d85942 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByReducerDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByReducerDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalGroupByReducerDataset"
   endpoint {
     name: "data.experimental.GroupByReducerDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByWindowDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByWindowDataset.pbtxt
index 9d1210c4a60..875aaa78dd8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByWindowDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalGroupByWindowDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalGroupByWindowDataset"
   endpoint {
     name: "data.experimental.GroupByWindowDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIgnoreErrorsDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIgnoreErrorsDataset.pbtxt
index 3ece1655397..ad31e9738d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIgnoreErrorsDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIgnoreErrorsDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalIgnoreErrorsDataset"
   endpoint {
     name: "data.experimental.IgnoreErrorsDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIteratorGetDevice.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIteratorGetDevice.pbtxt
index 2f46541bf3d..b1f2dfcf5c9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIteratorGetDevice.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalIteratorGetDevice.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalIteratorGetDevice"
   endpoint {
     name: "data.experimental.IteratorGetDevice"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLMDBDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLMDBDataset.pbtxt
index 32345d2200e..a427a85e631 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLMDBDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLMDBDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalLMDBDataset"
   endpoint {
     name: "data.experimental.LmdbDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLatencyStatsDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLatencyStatsDataset.pbtxt
index f02a5653e4f..21ed0bfde64 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLatencyStatsDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalLatencyStatsDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalLatencyStatsDataset"
   endpoint {
     name: "data.experimental.LatencyStatsDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapAndBatchDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapAndBatchDataset.pbtxt
index 762c1b49602..fa88e18887b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapAndBatchDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapAndBatchDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalMapAndBatchDataset"
   endpoint {
     name: "data.experimental.MapAndBatchDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapDataset.pbtxt
index 9406cffef9a..cdfa66a022e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMapDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalMapDataset"
   endpoint {
     name: "data.experimental.MapDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMatchingFilesDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMatchingFilesDataset.pbtxt
index bcc19749d12..ae0210b3f3e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMatchingFilesDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMatchingFilesDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalMatchingFilesDataset"
   endpoint {
     name: "data.experimental.MatchingFilesDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt
index 5336b380310..afe63cd09fe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalMaxIntraOpParallelismDataset"
   endpoint {
     name: "data.experimental.MaxIntraOpParallelismDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalNonSerializableDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalNonSerializableDataset.pbtxt
index 61fe8ae0ce9..5e3386e8483 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalNonSerializableDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalNonSerializableDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalNonSerializableDataset"
   endpoint {
     name: "data.experimental.NonSerializableDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParallelInterleaveDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParallelInterleaveDataset.pbtxt
index def35c75c91..ad33fe82aa8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParallelInterleaveDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParallelInterleaveDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalParallelInterleaveDataset"
   endpoint {
     name: "data.experimental.ParallelInterleaveDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParseExampleDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParseExampleDataset.pbtxt
index 8a1a5286033..741b6b1a96a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParseExampleDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalParseExampleDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalParseExampleDataset"
   endpoint {
     name: "data.experimental.ParseExampleDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalPrivateThreadPoolDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalPrivateThreadPoolDataset.pbtxt
index 0f035999282..667d9f53047 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalPrivateThreadPoolDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalPrivateThreadPoolDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalPrivateThreadPoolDataset"
   endpoint {
     name: "data.experimental.PrivateThreadPoolDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRandomDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRandomDataset.pbtxt
index a109386b43c..687e7c2782a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRandomDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRandomDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalRandomDataset"
   endpoint {
     name: "data.experimental.RandomDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRebatchDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRebatchDataset.pbtxt
index 2c6b6ccbfe6..8012dbae620 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRebatchDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalRebatchDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalRebatchDataset"
   endpoint {
     name: "data.experimental.RebatchDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalScanDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalScanDataset.pbtxt
index bea58e5b1b5..910fb561988 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalScanDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalScanDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalScanDataset"
   endpoint {
     name: "data.experimental.ScanDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSetStatsAggregatorDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSetStatsAggregatorDataset.pbtxt
index bd3129ae938..d3039499942 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSetStatsAggregatorDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSetStatsAggregatorDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalSetStatsAggregatorDataset"
   endpoint {
     name: "data.experimental.SetStatsAggregatorDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSleepDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSleepDataset.pbtxt
index 4ab827669ba..7c160528fcc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSleepDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSleepDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalSleepDataset"
   endpoint {
     name: "data.experimental.SleepDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSlidingWindowDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSlidingWindowDataset.pbtxt
index 045a2bc8a6a..aa0fe454722 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSlidingWindowDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSlidingWindowDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalSlidingWindowDataset"
   endpoint {
     name: "data.experimental.SlidingWindowDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSqlDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSqlDataset.pbtxt
index b0937039065..f827b21b8b7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSqlDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalSqlDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalSqlDataset"
   endpoint {
     name: "data.experimental.SqlDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorHandle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorHandle.pbtxt
index 1910ed3140c..ec2f2aff7ca 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorHandle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorHandle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalStatsAggregatorHandle"
   endpoint {
     name: "data.experimental.StatsAggregatorHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorSummary.pbtxt
index 9773131e228..6f9b79ac777 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalStatsAggregatorSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalStatsAggregatorSummary"
   endpoint {
     name: "data.experimental.StatsAggregatorSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalTakeWhileDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalTakeWhileDataset.pbtxt
index 207875b0588..2b494bd04c4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalTakeWhileDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalTakeWhileDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalTakeWhileDataset"
   endpoint {
     name: "data.experimental.TakeWhileDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolDataset.pbtxt
index e9825ac34c7..55fc4665fd9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalThreadPoolDataset"
   endpoint {
     name: "data.experimental.ThreadPoolDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolHandle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolHandle.pbtxt
index 55d81428408..ecaa0ceb2c9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolHandle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalThreadPoolHandle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalThreadPoolHandle"
   endpoint {
     name: "data.experimental.ThreadPoolHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUnbatchDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUnbatchDataset.pbtxt
index 137e99a60ad..c08a60749be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUnbatchDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUnbatchDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalUnbatchDataset"
   endpoint {
     name: "data.experimental.UnbatchDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUniqueDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUniqueDataset.pbtxt
index 108ad10a98f..d644078b402 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUniqueDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExperimentalUniqueDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExperimentalUniqueDataset"
   endpoint {
     name: "data.experimental.UniqueDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Expint.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Expint.pbtxt
index b2cd45a605e..64b09ca6ab7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Expint.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Expint.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Expint"
   endpoint {
     name: "math.special.Expint"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Expm1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Expm1.pbtxt
index 71a8fcf0225..df2ece3b9e8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Expm1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Expm1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Expm1"
   endpoint {
     name: "math.Expm1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractGlimpseV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractGlimpseV2.pbtxt
index 8fd84593339..e0491472fc4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractGlimpseV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractGlimpseV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExtractGlimpseV2"
   endpoint {
     name: "image.ExtractGlimpse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractImagePatches.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractImagePatches.pbtxt
index 7cdcfd75285..ab6177b5247 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractImagePatches.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractImagePatches.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExtractImagePatches"
   endpoint {
     name: "image.ExtractImagePatches"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractJpegShape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractJpegShape.pbtxt
index c95fcc9cef4..da8258cc5b2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractJpegShape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractJpegShape.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExtractJpegShape"
   endpoint {
     name: "image.ExtractJpegShape"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractVolumePatches.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractVolumePatches.pbtxt
index 6f61c832109..5d5d80ce08f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractVolumePatches.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ExtractVolumePatches.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ExtractVolumePatches"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT.pbtxt
index d9655903086..a50549a383e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FFT"
   endpoint {
     name: "signal.Fft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT2D.pbtxt
index 474103076b9..ffbf0a00050 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FFT2D"
   endpoint {
     name: "signal.Fft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT3D.pbtxt
index 8e1606b8f9d..a7415cc5d03 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FFT3D"
   endpoint {
     name: "signal.Fft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFTND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFTND.pbtxt
index 6c40faf3436..753cdcb1997 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FFTND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FFTND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FFTND"
   endpoint {
     name: "signal.FftNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FIFOQueueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FIFOQueueV2.pbtxt
index c0861a6e8dd..797fe75a0b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FIFOQueueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FIFOQueueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FIFOQueueV2"
   endpoint {
     name: "io.FifoQueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fact.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fact.pbtxt
index 436664e554b..f60455d31a5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fact.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fact.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Fact"
   endpoint {
     name: "math.Fact"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeParam.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeParam.pbtxt
index ac8f751442c..3310fb8af02 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeParam.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeParam.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeParam"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgs.pbtxt
index 809d231a55c..61723a6b616 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxArgs"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxArgs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
index 50d0f51a140..a995fff37e4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxArgsGradient"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxArgsGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVars.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVars.pbtxt
index b86258aab2b..7318899ee3f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVars.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVars.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxVars"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxVars"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
index 3c1343423c1..7738b510c20 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxVarsGradient"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxVarsGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
index afe45a290d3..270c2644610 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxVarsPerChannel"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxVarsPerChannel"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
index 9dd62fdffd0..0cd372b0162 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQuantWithMinMaxVarsPerChannelGradient"
   endpoint {
     name: "quantization.FakeQuantWithMinMaxVarsPerChannelGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQueue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQueue.pbtxt
index 8960966f084..b49719e8142 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQueue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FakeQueue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FakeQueue"
   endpoint {
     name: "io.FakeQueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FileSystemSetConfiguration.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FileSystemSetConfiguration.pbtxt
index a4699e1a487..48a9f01c087 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FileSystemSetConfiguration.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FileSystemSetConfiguration.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FileSystemSetConfiguration"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fill.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fill.pbtxt
index 3997328ed31..b0883b54e03 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fill.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fill.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Fill"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbedding.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbedding.pbtxt
index 991b3c66b12..5a5262fbe5a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbedding.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbedding.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "FinalizeTPUEmbedding"
   endpoint {
     name: "tpu.FinalizeTPUEmbedding"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbeddingV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbeddingV2.pbtxt
new file mode 100644
index 00000000000..7a8840309e4
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FinalizeTPUEmbeddingV2.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "FinalizeTPUEmbeddingV2"
+  endpoint {
+    name: "tpu.FinalizeTPUEmbedding"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fingerprint.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fingerprint.pbtxt
index 3a8379e516c..42f780314bb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Fingerprint.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Fingerprint.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Fingerprint"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedLengthRecordReaderV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedLengthRecordReaderV2.pbtxt
index f897c21365b..c6acb018dc2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedLengthRecordReaderV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedLengthRecordReaderV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FixedLengthRecordReaderV2"
   endpoint {
     name: "io.FixedLengthRecordReader"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedUnigramCandidateSampler.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedUnigramCandidateSampler.pbtxt
index eb9c68d4dba..b4e26238201 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedUnigramCandidateSampler.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FixedUnigramCandidateSampler.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FixedUnigramCandidateSampler"
   endpoint {
     name: "nn.FixedUnigramCandidateSampler"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Floor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Floor.pbtxt
index a2b80f97e05..9cbf0eb0e4e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Floor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Floor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Floor"
   endpoint {
     name: "math.Floor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorDiv.pbtxt
index 054d85f55c9..693eed27e08 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorDiv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorDiv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FloorDiv"
   endpoint {
     name: "math.FloorDiv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorMod.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorMod.pbtxt
index ff2216a9357..c6c7ea42659 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorMod.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FloorMod.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FloorMod"
   endpoint {
     name: "math.FloorMod"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FlushSummaryWriter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FlushSummaryWriter.pbtxt
index feaa3a6dc22..5731ce679d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FlushSummaryWriter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FlushSummaryWriter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FlushSummaryWriter"
   endpoint {
     name: "summary.FlushSummaryWriter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_For.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_For.pbtxt
index 30363d1e963..4d01b94bd26 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_For.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_For.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "For"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPool.pbtxt
index fc2e6ca54b8..1e2afb0ca3b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FractionalAvgPool"
   endpoint {
     name: "nn.FractionalAvgPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPoolGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPoolGrad.pbtxt
index 4e11d5e3950..f51859f903e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPoolGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalAvgPoolGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FractionalAvgPoolGrad"
   endpoint {
     name: "nn.FractionalAvgPoolGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPool.pbtxt
index 061b358ec27..ad0fddc2bc6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FractionalMaxPool"
   endpoint {
     name: "nn.FractionalMaxPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPoolGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPoolGrad.pbtxt
index c70e6d721e5..00bf30c2b68 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPoolGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FractionalMaxPoolGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FractionalMaxPoolGrad"
   endpoint {
     name: "nn.FractionalMaxPoolGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelCos.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelCos.pbtxt
index 88373aebe7c..239ea452c59 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelCos.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelCos.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FresnelCos"
   endpoint {
     name: "math.special.FresnelCos"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelSin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelSin.pbtxt
index 294d07b18ce..01e64aa2368 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelSin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FresnelSin.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FresnelSin"
   endpoint {
     name: "math.special.FresnelSin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormGradV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormGradV3.pbtxt
index ed5f0ebb827..bf2ae00fd7f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormGradV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormGradV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FusedBatchNormGradV3"
   endpoint {
     name: "nn.FusedBatchNormGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormV3.pbtxt
index 1fe73e286a0..e3cc882ca7d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedBatchNormV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FusedBatchNormV3"
   endpoint {
     name: "nn.FusedBatchNorm"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedPadConv2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedPadConv2D.pbtxt
index e7ee10e0c58..7e0d6eb913d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedPadConv2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedPadConv2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FusedPadConv2D"
   endpoint {
     name: "nn.FusedPadConv2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedResizeAndPadConv2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedResizeAndPadConv2D.pbtxt
index 6948fc1b87d..fc92f057104 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedResizeAndPadConv2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_FusedResizeAndPadConv2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "FusedResizeAndPadConv2D"
   endpoint {
     name: "nn.FusedResizeAndPadConv2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCell.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCell.pbtxt
index 1dffbe38475..0b5ab9c8b0b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCell.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCell.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GRUBlockCell"
   endpoint {
     name: "nn.GRUBlockCell"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCellGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCellGrad.pbtxt
index 33e7f4077f0..642a35b7945 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCellGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GRUBlockCellGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GRUBlockCellGrad"
   endpoint {
     name: "nn.GRUBlockCellGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherNd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherNd.pbtxt
index 257c0316ea0..80ed9a514c5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherNd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherNd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GatherNd"
   endpoint {
     name: "GatherNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherV2.pbtxt
index 0927e77a968..d27fd30efa0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GatherV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GatherV2"
   endpoint {
     name: "Gather"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureBlockCache.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureBlockCache.pbtxt
index 1ba3044d4c3..0878563c93b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureBlockCache.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureBlockCache.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GcsConfigureBlockCache"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureCredentials.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureCredentials.pbtxt
index 98bd555fb87..1653b16c4ee 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureCredentials.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GcsConfigureCredentials.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GcsConfigureCredentials"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBigQueryReaderPartitions.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBigQueryReaderPartitions.pbtxt
index 956f40762d7..3b037ef31c6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBigQueryReaderPartitions.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBigQueryReaderPartitions.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GenerateBigQueryReaderPartitions"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBoundingBoxProposals.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBoundingBoxProposals.pbtxt
index 2a830eb572e..069e9b74fff 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBoundingBoxProposals.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateBoundingBoxProposals.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GenerateBoundingBoxProposals"
   endpoint {
     name: "image.GenerateBoundingBoxProposals"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateVocabRemapping.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateVocabRemapping.pbtxt
index 9aac3b17f3c..02c132223ec 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateVocabRemapping.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GenerateVocabRemapping.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GenerateVocabRemapping"
   endpoint {
     name: "train.GenerateVocabRemapping"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetElementAtIndex.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetElementAtIndex.pbtxt
index 041f46e450e..9fac3335954 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetElementAtIndex.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetElementAtIndex.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GetElementAtIndex"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchSplitsWithPhysicalReplica.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchSplitsWithPhysicalReplica.pbtxt
new file mode 100644
index 00000000000..a9a8710fdb5
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchSplitsWithPhysicalReplica.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "GetMinibatchSplitsWithPhysicalReplica"
+  visibility: VISIBLE
+  endpoint {
+    name: "tpu.GetMinibatchSplitsWithPhysicalReplica"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchesInCsrWithPhysicalReplica.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchesInCsrWithPhysicalReplica.pbtxt
new file mode 100644
index 00000000000..9ee5d7e2e5b
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetMinibatchesInCsrWithPhysicalReplica.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "GetMinibatchesInCsrWithPhysicalReplica"
+  visibility: VISIBLE
+  endpoint {
+    name: "tpu.GetMinibatchesInCsrWithPhysicalReplica"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetOptions.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetOptions.pbtxt
index 382e395959f..eeb6d4c91d8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetOptions.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetOptions.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GetOptions"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionHandleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionHandleV2.pbtxt
index ba89942d77f..3484fbcd5d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionHandleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionHandleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GetSessionHandleV2"
   endpoint {
     name: "GetSessionHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionTensor.pbtxt
index 34b6e627cdc..496b31c6ef0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionTensor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetSessionTensor.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GetSessionTensor"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetStatsFromListOfSparseCoreCooTensors.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetStatsFromListOfSparseCoreCooTensors.pbtxt
new file mode 100644
index 00000000000..11a2b9eccba
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetStatsFromListOfSparseCoreCooTensors.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "GetStatsFromListOfSparseCoreCooTensors"
+  visibility: VISIBLE
+  endpoint {
+    name: "sparse.GetStatsFromListOfSparseCoreCooTensors"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GetTpuTaskId.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetTpuTaskId.pbtxt
new file mode 100644
index 00000000000..1072689506c
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GetTpuTaskId.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "GetTpuTaskId"
+  visibility: VISIBLE
+  endpoint {
+    name: "tpu.GetTpuTaskId"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalIterId.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalIterId.pbtxt
new file mode 100644
index 00000000000..8a795a8ef23
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalIterId.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "GlobalIterId"
+  visibility: VISIBLE
+  endpoint {
+    name: "tpu.GlobalIterId"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalShuffleDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalShuffleDataset.pbtxt
new file mode 100644
index 00000000000..ed286d3ae31
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GlobalShuffleDataset.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "GlobalShuffleDataset"
+  endpoint {
+    name: "data.GlobalShuffleDataset"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Greater.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Greater.pbtxt
index 594f9276be1..a84b4c9bc6b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Greater.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Greater.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Greater"
   endpoint {
     name: "math.Greater"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GreaterEqual.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GreaterEqual.pbtxt
index 17ea8696b0d..57f8c014728 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GreaterEqual.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GreaterEqual.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GreaterEqual"
   endpoint {
     name: "math.GreaterEqual"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_GuaranteeConst.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_GuaranteeConst.pbtxt
index 8cac25787da..56a115a0603 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_GuaranteeConst.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_GuaranteeConst.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "GuaranteeConst"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_HSVToRGB.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_HSVToRGB.pbtxt
index 95b042d5d68..5689d054353 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_HSVToRGB.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_HSVToRGB.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "HSVToRGB"
   endpoint {
     name: "image.HsvToRgb"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_HashTableV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_HashTableV2.pbtxt
index 38cc5818d3b..4cde617757a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_HashTableV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_HashTableV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "HashTableV2"
   endpoint {
     name: "HashTable"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramFixedWidth.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramFixedWidth.pbtxt
index f64d9ae1d23..f3d2065032f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramFixedWidth.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramFixedWidth.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "HistogramFixedWidth"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramSummary.pbtxt
index 97f28335bb9..6c2c3b8254a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_HistogramSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "HistogramSummary"
   endpoint {
     name: "summary.HistogramSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_HostConst.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_HostConst.pbtxt
index ba589e73e71..f2a7160eccd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_HostConst.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_HostConst.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "HostConst"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT.pbtxt
index 4a15ebec7f6..a84e2d6dd57 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IFFT"
   endpoint {
     name: "signal.Ifft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT2D.pbtxt
index 35d696ee739..3380f459463 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IFFT2D"
   endpoint {
     name: "signal.Ifft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT3D.pbtxt
index 76a3164e6aa..02db3a66379 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IFFT3D"
   endpoint {
     name: "signal.Ifft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFTND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFTND.pbtxt
index dcee8641384..214a8bfc0b8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFTND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IFFTND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IFFTND"
   endpoint {
     name: "signal.IfftNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT.pbtxt
index 7a68b01524e..ebd31423283 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IRFFT"
   endpoint {
     name: "signal.Irfft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT2D.pbtxt
index 239ec445d02..e73397a832f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IRFFT2D"
   endpoint {
     name: "signal.Irfft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT3D.pbtxt
index 87969436b7f..e6a064cfa6c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IRFFT3D"
   endpoint {
     name: "signal.Irfft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFTND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFTND.pbtxt
index bdab5f77932..848e444c33e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFTND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IRFFTND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IRFFTND"
   endpoint {
     name: "signal.IrfftNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Identity.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Identity.pbtxt
index b6df3c6cfec..f90a3e1b0f9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Identity.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Identity.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Identity"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityN.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityN.pbtxt
index 827df10c65a..a39e00d4106 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityN.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityN.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IdentityN"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityReaderV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityReaderV2.pbtxt
index 8081ac26b3d..92bc4a10279 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityReaderV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IdentityReaderV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IdentityReaderV2"
   endpoint {
     name: "io.IdentityReader"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_If.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_If.pbtxt
index a3bc33ac2ce..292c093587e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_If.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_If.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "If"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Igamma.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Igamma.pbtxt
index cbdd8b984c4..e0134f5acc4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Igamma.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Igamma.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Igamma"
   endpoint {
     name: "math.Igamma"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IgammaGradA.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IgammaGradA.pbtxt
index 0659c80c39f..46eaba97345 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IgammaGradA.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IgammaGradA.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IgammaGradA"
   endpoint {
     name: "math.IgammaGradA"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Igammac.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Igammac.pbtxt
index 94f6085e1a9..3114d90fd61 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Igammac.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Igammac.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Igammac"
   endpoint {
     name: "math.Igammac"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Imag.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Imag.pbtxt
index 4227c7078fc..66427ed58bd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Imag.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Imag.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Imag"
   endpoint {
     name: "math.Imag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV2.pbtxt
index badcf0d63ca..ae6bb8507be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ImageProjectiveTransformV2"
   endpoint {
     name: "image.ImageProjectiveTransformV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV3.pbtxt
index 765ffe8107a..2f477c6d695 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageProjectiveTransformV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ImageProjectiveTransformV3"
   endpoint {
     name: "image.ImageProjectiveTransformV3"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageSummary.pbtxt
index 1871e6b6550..5c3bd5f5047 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImageSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ImageSummary"
   endpoint {
     name: "summary.ImageSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImmutableConst.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImmutableConst.pbtxt
index fd0384dc452..6f7a34c5a69 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImmutableConst.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImmutableConst.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ImmutableConst"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImportEvent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImportEvent.pbtxt
index c2d8d2eba35..630a8894724 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ImportEvent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ImportEvent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ImportEvent"
   endpoint {
     name: "summary.ImportEvent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InTopKV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InTopKV2.pbtxt
index 400ee714624..0fc46096895 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InTopKV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InTopKV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InTopKV2"
   endpoint {
     name: "nn.InTopK"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IndexFlatMapDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IndexFlatMapDataset.pbtxt
new file mode 100644
index 00000000000..682904a7504
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IndexFlatMapDataset.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "IndexFlatMapDataset"
+  visibility: VISIBLE
+  endpoint {
+    name: "data.IndexFlatMapDataset"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeue.pbtxt
index 3cc8a15b7c5..0d57c36ce27 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InfeedDequeue"
   endpoint {
     name: "tpu.InfeedDequeue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeueTuple.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeueTuple.pbtxt
index 6e1f824518f..3655572e592 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeueTuple.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedDequeueTuple.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InfeedDequeueTuple"
   endpoint {
     name: "tpu.InfeedDequeueTuple"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueue.pbtxt
index 2e5f2cf8171..889c70cbfb1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InfeedEnqueue"
   endpoint {
     name: "tpu.InfeedEnqueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueuePrelinearizedBuffer.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueuePrelinearizedBuffer.pbtxt
index 2d78a4c3f19..e37e5ed26cf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueuePrelinearizedBuffer.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueuePrelinearizedBuffer.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InfeedEnqueuePrelinearizedBuffer"
   endpoint {
     name: "tpu.InfeedEnqueuePrelinearizedBuffer"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueueTuple.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueueTuple.pbtxt
index dc50c834e39..99c1cc4f0a1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueueTuple.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InfeedEnqueueTuple.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InfeedEnqueueTuple"
   endpoint {
     name: "tpu.InfeedEnqueueTuple"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableFromTextFileV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableFromTextFileV2.pbtxt
index 9a4f7022018..34712e89316 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableFromTextFileV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableFromTextFileV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InitializeTableFromTextFileV2"
   endpoint {
     name: "InitializeTableFromTextFile"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableV2.pbtxt
index d7a9a813d07..efb93c75341 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InitializeTableV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InitializeTableV2"
   endpoint {
     name: "InitializeTable"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceAdd.pbtxt
index 3d157ab7f83..c5b62051abe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InplaceAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceSub.pbtxt
index b2ed5496430..2b6359ab957 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InplaceSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceUpdate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceUpdate.pbtxt
index 91041b43abd..8d3a0f9d699 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceUpdate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InplaceUpdate.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InplaceUpdate"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Inv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Inv.pbtxt
index 49f3e6c0429..543e2c9bbe8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Inv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Inv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Inv"
   endpoint {
     name: "linalg.Inv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InvGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InvGrad.pbtxt
index d3bfa78e99c..560855ffcf2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InvGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InvGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InvGrad"
   endpoint {
     name: "nn.InvGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Invert.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Invert.pbtxt
index 9898bfa003d..6119fb19629 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Invert.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Invert.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Invert"
   endpoint {
     name: "bitwise.Invert"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_InvertPermutation.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_InvertPermutation.pbtxt
index 9ee103f554e..3fa442de299 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_InvertPermutation.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_InvertPermutation.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "InvertPermutation"
   endpoint {
     name: "math.InvertPermutation"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesEnsembleInitialized.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesEnsembleInitialized.pbtxt
index 5c4bdd5ddc4..9efd7cd8357 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesEnsembleInitialized.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesEnsembleInitialized.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "IsBoostedTreesEnsembleInitialized"
   endpoint {
     name: "estimator.IsBoostedTreesEnsembleInitialized"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesQuantileStreamResourceInitialized.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesQuantileStreamResourceInitialized.pbtxt
index 8676e052712..630406cc2f0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesQuantileStreamResourceInitialized.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsBoostedTreesQuantileStreamResourceInitialized.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "IsBoostedTreesQuantileStreamResourceInitialized"
   endpoint {
     name: "estimator.IsBoostedTreesQuantileStreamResourceInitialized"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsFinite.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsFinite.pbtxt
index fce58903991..1c33eae3169 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsFinite.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsFinite.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsFinite"
   endpoint {
     name: "math.IsFinite"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsInf.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsInf.pbtxt
index 823c1d72812..dbe157edb81 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsInf.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsInf.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsInf"
   endpoint {
     name: "math.IsInf"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsNan.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsNan.pbtxt
index 58805bf99f6..a5575098082 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsNan.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsNan.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsNan"
   endpoint {
     name: "math.IsNan"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsTPUEmbeddingInitialized.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsTPUEmbeddingInitialized.pbtxt
index e0e66156b85..8f99e9b3e71 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsTPUEmbeddingInitialized.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsTPUEmbeddingInitialized.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsTPUEmbeddingInitialized"
   endpoint {
     name: "tpu.IsTPUEmbeddingInitialized"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsVariableInitialized.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsVariableInitialized.pbtxt
index 7bf51da2da9..b5f0f182125 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsVariableInitialized.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsVariableInitialized.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsVariableInitialized"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsotonicRegression.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsotonicRegression.pbtxt
index ec88abda69e..e0d1edb67aa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IsotonicRegression.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IsotonicRegression.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IsotonicRegression"
   endpoint {
     name: "nn.IsotonicRegression"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorFromStringHandleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorFromStringHandleV2.pbtxt
index 86745a3a564..214318f4aea 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorFromStringHandleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorFromStringHandleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IteratorFromStringHandleV2"
   endpoint {
     name: "data.IteratorFromStringHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetDevice.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetDevice.pbtxt
index 8f829ce2787..9da26e5af9f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetDevice.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetDevice.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IteratorGetDevice"
   endpoint {
     name: "data.IteratorGetDevice"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetModelProto.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetModelProto.pbtxt
new file mode 100644
index 00000000000..588803255e0
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetModelProto.pbtxt
@@ -0,0 +1,6 @@
+op {
+  graph_op_name: "IteratorGetModelProto"
+  endpoint {
+    name: "data.IteratorGetModelProto"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextAsOptional.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextAsOptional.pbtxt
index ae02a0e0171..95ea8dc4224 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextAsOptional.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextAsOptional.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IteratorGetNextAsOptional"
   endpoint {
     name: "data.IteratorGetNextAsOptional"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextSync.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextSync.pbtxt
index 4aa7c07a774..5f74f24e12f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextSync.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorGetNextSync.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IteratorGetNextSync"
   endpoint {
     name: "data.IteratorGetNextSync"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorToStringHandle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorToStringHandle.pbtxt
index 7413ec846e7..0a7723ac676 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorToStringHandle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_IteratorToStringHandle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "IteratorToStringHandle"
   endpoint {
     name: "data.IteratorToStringHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_KMC2ChainInitialization.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_KMC2ChainInitialization.pbtxt
index 5bf90f86d8c..5c2ed95566d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_KMC2ChainInitialization.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_KMC2ChainInitialization.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "KMC2ChainInitialization"
   endpoint {
     name: "cluster.KMC2ChainInitialization"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_KmeansPlusPlusInitialization.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_KmeansPlusPlusInitialization.pbtxt
index f3f71d56162..b4cb77a981b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_KmeansPlusPlusInitialization.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_KmeansPlusPlusInitialization.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "KmeansPlusPlusInitialization"
   endpoint {
     name: "cluster.KmeansPlusPlusInitialization"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_KthOrderStatistic.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_KthOrderStatistic.pbtxt
index 9e737cdf29c..98c602c5ebe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_KthOrderStatistic.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_KthOrderStatistic.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "KthOrderStatistic"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_L2Loss.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_L2Loss.pbtxt
index c348e0f0e0b..e00de2d3643 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_L2Loss.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_L2Loss.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "L2Loss"
   endpoint {
     name: "nn.L2Loss"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LMDBReader.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LMDBReader.pbtxt
index b6cd7bdbb7e..cff04abb5f5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LMDBReader.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LMDBReader.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LMDBReader"
   endpoint {
     name: "io.LmdbReader"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LRN.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LRN.pbtxt
index d16fea31843..5990d283ee7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LRN.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LRN.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LRN"
   endpoint {
     name: "nn.LocalResponseNormalization"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LRNGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LRNGrad.pbtxt
index a50e738d785..f6c64ca6d04 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LRNGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LRNGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LRNGrad"
   endpoint {
     name: "nn.LocalResponseNormalizationGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCell.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCell.pbtxt
index a1e1f52a119..dd8baae2a1e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCell.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCell.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LSTMBlockCell"
   endpoint {
     name: "nn.LSTMBlockCell"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCellGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCellGrad.pbtxt
index 4d0c82555d2..518a29ef8b1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCellGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LSTMBlockCellGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LSTMBlockCellGrad"
   endpoint {
     name: "nn.LSTMBlockCellGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LeakyReluGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LeakyReluGrad.pbtxt
index 9899c64c13e..0b6ab5953da 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LeakyReluGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LeakyReluGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LeakyReluGrad"
   endpoint {
     name: "data.LeakyReluGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LearnedUnigramCandidateSampler.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LearnedUnigramCandidateSampler.pbtxt
index 5f193da1be1..bc4ab82cdc2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LearnedUnigramCandidateSampler.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LearnedUnigramCandidateSampler.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LearnedUnigramCandidateSampler"
   endpoint {
     name: "nn.LearnedUnigramCandidateSampler"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LeftShift.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LeftShift.pbtxt
index 44a8727e40f..e00c50f0d68 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LeftShift.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LeftShift.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LeftShift"
   endpoint {
     name: "bitwise.LeftShift"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Less.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Less.pbtxt
index 577d2556b81..0fa328a0f94 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Less.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Less.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Less"
   endpoint {
     name: "math.Less"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LessEqual.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LessEqual.pbtxt
index 6cad35c6226..7faf528185e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LessEqual.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LessEqual.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LessEqual"
   endpoint {
     name: "math.LessEqual"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Lgamma.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Lgamma.pbtxt
index eb7bc9660c0..b6e817d9d69 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Lgamma.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Lgamma.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Lgamma"
   endpoint {
     name: "math.Lgamma"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LinSpace.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LinSpace.pbtxt
index 599c310021a..09eb5212385 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LinSpace.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LinSpace.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LinSpace"
   endpoint {
     name: "LinSpace"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDataset.pbtxt
index 8639c922a6b..434f9b2c332 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ListDataset"
   endpoint {
     name: "data.ListDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDiff.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDiff.pbtxt
index aa94c958f17..4cac575906b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDiff.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListDiff.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ListDiff"
   endpoint {
     name: "SetDiff1d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ListSnapshotChunksDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListSnapshotChunksDataset.pbtxt
new file mode 100644
index 00000000000..e48c7bf0a11
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ListSnapshotChunksDataset.pbtxt
@@ -0,0 +1,7 @@
+op {
+  graph_op_name: "ListSnapshotChunksDataset"
+  visibility: VISIBLE
+  endpoint {
+    name: "data.ListSnapshotChunksDataset"
+  }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAllTPUEmbeddingParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAllTPUEmbeddingParameters.pbtxt
index 0efa1b29cf2..1c5cf1f526e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAllTPUEmbeddingParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAllTPUEmbeddingParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadAllTPUEmbeddingParameters"
   endpoint {
     name: "tpu.LoadAllTPUEmbeddingParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAndRemapMatrix.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAndRemapMatrix.pbtxt
index 54ee68fde44..3c8984a6cb6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAndRemapMatrix.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadAndRemapMatrix.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadAndRemapMatrix"
   endpoint {
     name: "linalg.LoadAndRemapMatrix"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParameters.pbtxt
index b5c87ea4f40..a591533cf42 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingADAMParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingADAMParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
index 6702f33be77..cd5c2cf8a4d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingADAMParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingADAMParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParameters.pbtxt
index 630f6adadcd..4b4466b3ebe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingAdadeltaParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingAdadeltaParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
index 3b4a4a8de5a..3e6fbd0cda8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingAdadeltaParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt
index 4b81cfb1c65..4ab6b43fb23 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingAdagradMomentumParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingAdagradMomentumParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParameters.pbtxt
index dccec09e382..53dc92a921c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingAdagradParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingAdagradParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
index bd6f676de12..fa8a345407c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingAdagradParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingAdagradParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt
index 5e4356fb9a4..f1781c96808 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingCenteredRMSPropParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingCenteredRMSPropParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParameters.pbtxt
index e262d4fa79e..6b3377f0d72 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingFTRLParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingFTRLParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
index 363d4f38bfa..f17d8fcc776 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingFTRLParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingFTRLParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt
index 97111705c86..33baa18848c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingFrequencyEstimatorParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
index 1b4493b7fd5..fba39f852dc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt
index 1336f14f472..d03ed8796c1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingMDLAdagradLightParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingMDLAdagradLightParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParameters.pbtxt
index 1e56e9c9000..2ea9c9cfc8c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingMomentumParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingMomentumParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
index 99bdda45764..fccbff595f9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingMomentumParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingMomentumParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParameters.pbtxt
index 6771025a7d8..1b6f3afd838 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingProximalAdagradParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingProximalAdagradParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
index 870549ab640..65fe6e27afe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParameters.pbtxt
index 2953882f543..f416308b609 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingProximalYogiParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingProximalYogiParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
index c55d7d84731..2656c561bbb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingProximalYogiParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParameters.pbtxt
index 18a175c7668..bee7db0753b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingRMSPropParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingRMSPropParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
index 1dbe67ff290..3becf4df5d3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingRMSPropParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingRMSPropParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt
index 846fb1a9c4f..57102d66657 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingStochasticGradientDescentParameters"
   endpoint {
     name: "tpu.LoadTPUEmbeddingStochasticGradientDescentParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
index 86e15662c61..e6dae92e1f2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"
   endpoint {
     name: "tpu.LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Log.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Log.pbtxt
index 9d11c26c71a..79d5b27a477 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Log.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Log.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Log"
   endpoint {
     name: "math.Log"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Log1p.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Log1p.pbtxt
index 6cc1d6e6c82..f91d9ec6a09 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Log1p.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Log1p.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Log1p"
   endpoint {
     name: "math.Log1p"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogMatrixDeterminant.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogMatrixDeterminant.pbtxt
index 5e52d9ecedc..3828143fa9b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogMatrixDeterminant.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogMatrixDeterminant.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogMatrixDeterminant"
   endpoint {
     name: "linalg.LogMatrixDeterminant"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogSoftmax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogSoftmax.pbtxt
index 19518a71ea4..94186851ee9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogSoftmax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogSoftmax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogSoftmax"
   endpoint {
     name: "nn.LogSoftmax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogUniformCandidateSampler.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogUniformCandidateSampler.pbtxt
index bdcf01c20fd..c1f7c67d6e5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogUniformCandidateSampler.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogUniformCandidateSampler.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogUniformCandidateSampler"
   endpoint {
     name: "random.LogUniformCandidateSampler"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalAnd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalAnd.pbtxt
index 12921dd932c..ebd2c0f5d60 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalAnd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalAnd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogicalAnd"
   endpoint {
     name: "math.LogicalAnd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalNot.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalNot.pbtxt
index 9e0960958ed..3665727828c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalNot.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalNot.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogicalNot"
   endpoint {
     name: "math.LogicalNot"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalOr.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalOr.pbtxt
index 6c834e46414..e4a567d034e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalOr.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LogicalOr.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LogicalOr"
   endpoint {
     name: "math.LogicalOr"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableExportV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableExportV2.pbtxt
index d780f2a21d7..0e0c4a4fac3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableExportV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableExportV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableExportV2"
   endpoint {
     name: "LookupTableExport"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableFindV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableFindV2.pbtxt
index 2247547b62b..936dd7afecf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableFindV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableFindV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableFindV2"
   endpoint {
     name: "LookupTableFind"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableImportV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableImportV2.pbtxt
index a39cffa12d1..9e7925797da 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableImportV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableImportV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableImportV2"
   endpoint {
     name: "LookupTableImport"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableInsertV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableInsertV2.pbtxt
index 037b743b6be..d5db4b3b535 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableInsertV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableInsertV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableInsertV2"
   endpoint {
     name: "LookupTableInsert"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableRemoveV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableRemoveV2.pbtxt
index 61f6d8db36a..911df9ae719 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableRemoveV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableRemoveV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableRemoveV2"
   endpoint {
     name: "LookupTableRemove"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableSizeV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableSizeV2.pbtxt
index b5526230d76..fafc1f8c910 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableSizeV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LookupTableSizeV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LookupTableSizeV2"
   endpoint {
     name: "LookupTableSize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoopCond.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoopCond.pbtxt
index 492f78f62ee..88907751f5e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LoopCond.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LoopCond.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LoopCond"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_LowerBound.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_LowerBound.pbtxt
index 31f1d3038ca..3e92dbec886 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_LowerBound.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_LowerBound.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "LowerBound"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Lu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Lu.pbtxt
index 45e3be1da5e..269a5fbb7d5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Lu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Lu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Lu"
   endpoint {
     name: "linalg.Lu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MakeUnique.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MakeUnique.pbtxt
index a485fd5b938..3c61d469fd4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MakeUnique.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MakeUnique.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MakeUnique"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapClear.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapClear.pbtxt
index 3ed9bf8a5d8..b5bba0a4941 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapClear.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapClear.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapClear"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapDefun.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapDefun.pbtxt
index 43b1dc722c0..0e069d080fe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapDefun.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapDefun.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapDefun"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapIncompleteSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapIncompleteSize.pbtxt
index 2472209d20a..cd28e3f8a28 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapIncompleteSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapIncompleteSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapIncompleteSize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapPeek.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapPeek.pbtxt
index eb1bd158f0d..3541cc96d63 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapPeek.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapPeek.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapPeek"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapSize.pbtxt
index fe1d5701b4e..30bad2d2ccb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapSize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapStage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapStage.pbtxt
index 6d9f66cfc48..fab2cf93595 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapStage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapStage.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapStage"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstage.pbtxt
index bb118f0fcb9..82be22f5dc8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstage.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapUnstage"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstageNoKey.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstageNoKey.pbtxt
index 1004e96482a..dac737d2486 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstageNoKey.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MapUnstageNoKey.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MapUnstageNoKey"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatMul.pbtxt
index fe4b8405b9c..bc7b2833b00 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatMul.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatMul"
   endpoint {
     name: "linalg.MatMul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatchingFiles.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatchingFiles.pbtxt
index bb7b0968957..fea6f70d5cf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatchingFiles.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatchingFiles.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatchingFiles"
   endpoint {
     name: "io.MatchingFiles"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixBandPart.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixBandPart.pbtxt
index eaf426c00ef..24f955501f7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixBandPart.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixBandPart.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixBandPart"
   endpoint {
     name: "linalg.BandPart"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDeterminant.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDeterminant.pbtxt
index b56d2dad3a8..933a41dddc0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDeterminant.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDeterminant.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixDeterminant"
   endpoint {
     name: "linalg.Det"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV2.pbtxt
index 03f0b064272..c40b2c16745 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixDiagPartV2"
   endpoint {
     name: "linalg.MatrixDiagPart"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV3.pbtxt
index fe362b5058d..05fadef2d8a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagPartV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixDiagPartV3"
   endpoint {
     name: "linalg.MatrixDiagPartV3"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV2.pbtxt
index 44d6cc40593..b25ce946738 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixDiagV2"
   endpoint {
     name: "linalg.MatrixDiag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV3.pbtxt
index cebc08f5e23..fdb19d77f1b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixDiagV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixDiagV3"
   endpoint {
     name: "linalg.MatrixDiagV3"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixExponential.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixExponential.pbtxt
index fb232dab983..9db500e926a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixExponential.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixExponential.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixExponential"
   endpoint {
     name: "linalg.MatrixExponential"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixInverse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixInverse.pbtxt
index 68721fc78db..1792f6deef7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixInverse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixInverse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixInverse"
   endpoint {
     name: "linalg.Inv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixLogarithm.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixLogarithm.pbtxt
index 04137ffae79..9fcb9badeb5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixLogarithm.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixLogarithm.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixLogarithm"
   endpoint {
     name: "linalg.MatrixLogarithm"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSetDiagV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSetDiagV3.pbtxt
index a9d26f74c7f..28a0fb5b934 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSetDiagV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSetDiagV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixSetDiagV3"
   endpoint {
     name: "linalg.MatrixSetDiag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolve.pbtxt
index 02c21448bba..cf4bc9b04a6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixSolve"
   endpoint {
     name: "linalg.Solve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolveLs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolveLs.pbtxt
index 9cee578ec28..c7fc10c5447 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolveLs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSolveLs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixSolveLs"
   endpoint {
     name: "linalg.MatrixSolveLs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSquareRoot.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSquareRoot.pbtxt
index 14c7624fe37..42d3518b6e5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSquareRoot.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixSquareRoot.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixSquareRoot"
   endpoint {
     name: "linalg.Sqrtm"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixTriangularSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixTriangularSolve.pbtxt
index 1f61e99efe4..0e20889c3ae 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixTriangularSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MatrixTriangularSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MatrixTriangularSolve"
   endpoint {
     name: "linalg.TriangularSolve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Max.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Max.pbtxt
index 03868720edf..112ab3af60a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Max.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Max.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Max"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3D.pbtxt
index 17aeb6a8c93..77232e6020a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPool3D"
   endpoint {
     name: "nn.MaxPool3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGrad.pbtxt
index ca7a7be835b..bbdc2058f46 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPool3DGrad"
   endpoint {
     name: "nn.MaxPool3dGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGradGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGradGrad.pbtxt
index c70aa3fe30a..cd2d4d76817 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGradGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPool3DGradGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPool3DGradGrad"
   endpoint {
     name: "nn.MaxPool3dGradGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradV2.pbtxt
index 2ca8a7b0221..68a36bb8b63 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolGradGradV2"
   endpoint {
     name: "nn.MaxPoolGradGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradWithArgmax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradWithArgmax.pbtxt
index d43cf7447cc..84f92c16fd9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradWithArgmax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradGradWithArgmax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolGradGradWithArgmax"
   endpoint {
     name: "nn.MaxPoolGradGradWithArgmax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradV2.pbtxt
index 556dd0be502..78c26e0d20a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolGradV2"
   endpoint {
     name: "nn.MaxPoolGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradWithArgmax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradWithArgmax.pbtxt
index c10701f555e..82d58e00566 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradWithArgmax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolGradWithArgmax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolGradWithArgmax"
   endpoint {
     name: "nn.MaxPoolGradWithArgmax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolV2.pbtxt
index 84634943553..a8ebcfea908 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolV2"
   endpoint {
     name: "nn.MaxPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolWithArgmax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolWithArgmax.pbtxt
index 43630534cb7..c0e9bb2aa29 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolWithArgmax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MaxPoolWithArgmax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MaxPoolWithArgmax"
   endpoint {
     name: "nn.MaxPoolWithArgmax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Maximum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Maximum.pbtxt
index 1df9c605305..0a510be5947 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Maximum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Maximum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Maximum"
   endpoint {
     name: "math.Maximum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mean.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mean.pbtxt
index 7bdcdc3d74e..707b1eddb86 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mean.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mean.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Mean"
   endpoint {
     name: "math.Mean"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Merge.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Merge.pbtxt
index 954d5085adf..e04a5e7670d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Merge.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Merge.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Merge"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeDedupData.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeDedupData.pbtxt
index f3ec72b9e9d..c62ad85f44d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeDedupData.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeDedupData.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MergeDedupData"
   endpoint {
     name: "tpu.MergeDedupData"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeSummary.pbtxt
index f52c7c0996b..528399aaec1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MergeSummary"
   endpoint {
     name: "summary.MergeSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeV2Checkpoints.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeV2Checkpoints.pbtxt
index 8899c8c4ed7..671ae3f9917 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeV2Checkpoints.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MergeV2Checkpoints.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MergeV2Checkpoints"
   endpoint {
     name: "train.MergeV2Checkpoints"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mfcc.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mfcc.pbtxt
index 6cb04e73ff4..018361798cc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mfcc.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mfcc.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Mfcc"
   endpoint {
     name: "audio.Mfcc"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Min.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Min.pbtxt
index 72894c1ffd4..3355adfbdde 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Min.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Min.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Min"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Minimum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Minimum.pbtxt
index 69f76a98299..cb33aa21fb4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Minimum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Minimum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Minimum"
   endpoint {
     name: "math.Minimum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPad.pbtxt
index e1cb766f8fd..5bc1ebdacbc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPad.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MirrorPad"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPadGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPadGrad.pbtxt
index ddd8ab3ba1d..0a9c168e261 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPadGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MirrorPadGrad.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MirrorPadGrad"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MlirPassthroughOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MlirPassthroughOp.pbtxt
index 975ae9a6dee..bf4453b8f2d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MlirPassthroughOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MlirPassthroughOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MlirPassthroughOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mod.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mod.pbtxt
index 76fbbe97a89..e4003385089 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mod.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mod.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Mod"
   endpoint {
     name: "math.Mod"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mul.pbtxt
index 605e110931e..8d1f243721d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Mul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Mul.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Mul"
   endpoint {
     name: "math.Mul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MulNoNan.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MulNoNan.pbtxt
index 23e2247ddcf..e5af10eb9b4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MulNoNan.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MulNoNan.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MulNoNan"
   endpoint {
     name: "math.MulNoNan"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIterator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIterator.pbtxt
index 81eabf9bdcf..b51de3ab1e5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIterator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIterator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MultiDeviceIterator"
   endpoint {
     name: "data.MultiDeviceIterator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorFromStringHandle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorFromStringHandle.pbtxt
index 4006f72d520..59f8a287dad 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorFromStringHandle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorFromStringHandle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MultiDeviceIteratorFromStringHandle"
   endpoint {
     name: "data.MultiDeviceIteratorFromStringHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorGetNextFromShard.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorGetNextFromShard.pbtxt
index a7e6fc15082..f36e12598c4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorGetNextFromShard.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorGetNextFromShard.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MultiDeviceIteratorGetNextFromShard"
   endpoint {
     name: "data.MultiDeviceIteratorGetNextFromShard"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorInit.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorInit.pbtxt
index 1663bc5c226..3b477b5b21f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorInit.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorInit.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MultiDeviceIteratorInit"
   endpoint {
     name: "data.MultiDeviceIteratorInit"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorToStringHandle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorToStringHandle.pbtxt
index ff061da390b..3d4958f4495 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorToStringHandle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MultiDeviceIteratorToStringHandle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MultiDeviceIteratorToStringHandle"
   endpoint {
     name: "data.MultiDeviceIteratorToStringHandle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Multinomial.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Multinomial.pbtxt
index bd98bb10b4b..2b693f02801 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Multinomial.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Multinomial.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Multinomial"
   endpoint {
     name: "random.Multinomial"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableDenseHashTableV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableDenseHashTableV2.pbtxt
index d1f7f26848b..8f20cfd93f5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableDenseHashTableV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableDenseHashTableV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MutableDenseHashTableV2"
   endpoint {
     name: "MutableDenseHashTable"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableOfTensorsV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableOfTensorsV2.pbtxt
index f6d7451267b..d9c26fde9dc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableOfTensorsV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableOfTensorsV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MutableHashTableOfTensorsV2"
   endpoint {
     name: "MutableHashTableOfTensors"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableV2.pbtxt
index 45d619d6747..0cfaa1a226b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutableHashTableV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MutableHashTableV2"
   endpoint {
     name: "MutableHashTable"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexLock.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexLock.pbtxt
index 75c7be5286a..99bf40ba1ed 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexLock.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexLock.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MutexLock"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexV2.pbtxt
index f89cd106432..17198f4f38c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_MutexV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "MutexV2"
   endpoint {
     name: "Mutex"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclAllReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclAllReduce.pbtxt
index eec537f2059..cd7390fa15e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclAllReduce.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclAllReduce.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NcclAllReduce"
   endpoint: {
     name: "distribute.NcclAllReduce"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclBroadcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclBroadcast.pbtxt
index 42150e2853b..74abc5b82d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclBroadcast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclBroadcast.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NcclBroadcast"
   endpoint: {
     name: "distribute.NcclBroadcast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclReduce.pbtxt
index 77ce0f6d79a..a0ee5487b3e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclReduce.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NcclReduce.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NcclReduce"
   endpoint: {
     name: "distribute.NcclReduce"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Ndtri.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Ndtri.pbtxt
index 9e03d7c01f1..9394ba422af 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Ndtri.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Ndtri.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Ndtri"
   endpoint {
     name: "math.Ndtri"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NearestNeighbors.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NearestNeighbors.pbtxt
index da67ad1992c..d50362c31fd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NearestNeighbors.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NearestNeighbors.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NearestNeighbors"
   endpoint {
     name: "image.NearestNeighbors"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Neg.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Neg.pbtxt
index c7e9ede2a56..440db9e6414 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Neg.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Neg.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Neg"
   endpoint {
     name: "math.Neg"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NegTrain.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NegTrain.pbtxt
index eb62186362a..5f381b6e034 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NegTrain.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NegTrain.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NegTrain"
   endpoint {
     name: "train.NegTrain"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NextAfter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NextAfter.pbtxt
index b5ab917ba84..c1c88706cb2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NextAfter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NextAfter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NextAfter"
   endpoint {
     name: "math.NextAfter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NextIteration.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NextIteration.pbtxt
index bc63f6ada14..63b551aad19 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NextIteration.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NextIteration.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NextIteration"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NoOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NoOp.pbtxt
index 337fb5da146..f3d89127156 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NoOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NoOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NoOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonDeterministicInts.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonDeterministicInts.pbtxt
index 564fbc5928d..aa8cc027cd6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonDeterministicInts.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonDeterministicInts.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NonDeterministicInts"
   endpoint {
     name: "random.NonDeterministicInts"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionV5.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionV5.pbtxt
index 31860e21e45..7821a13912d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionV5.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionV5.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NonMaxSuppressionV5"
   endpoint {
     name: "image.NonMaxSuppression"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionWithOverlaps.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
index 06fa52920d2..de5488bb255 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NonMaxSuppressionWithOverlaps"
   endpoint {
     name: "image.NonMaxSuppressionWithOverlaps"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NotEqual.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NotEqual.pbtxt
index 0d99af40b5d..5b587960e14 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NotEqual.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NotEqual.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NotEqual"
   endpoint {
     name: "math.NotEqual"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_NthElement.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_NthElement.pbtxt
index 57097e634aa..7930c041ad3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_NthElement.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_NthElement.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "NthElement"
   endpoint {
     name: "nn.NthElement"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OneHot.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OneHot.pbtxt
index 66872d5eb8e..116d0272f16 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OneHot.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OneHot.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OneHot"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OnesLike.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OnesLike.pbtxt
index 97abe0814a9..06bdeacbd0e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OnesLike.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OnesLike.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OnesLike"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalFromValue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalFromValue.pbtxt
index d251fd5d948..282d866f180 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalFromValue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalFromValue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OptionalFromValue"
   endpoint {
     name: "data.OptionalFromValue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalGetValue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalGetValue.pbtxt
index 7fcdb5ac694..b0be584ec90 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalGetValue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalGetValue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OptionalGetValue"
   endpoint {
     name: "data.OptionalGetValue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalHasValue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalHasValue.pbtxt
index 4ffa15b564c..f2778d04bb8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalHasValue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalHasValue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OptionalHasValue"
   endpoint {
     name: "data.OptionalHasValue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalNone.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalNone.pbtxt
index cec29a42ae2..77ec49c964c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalNone.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OptionalNone.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OptionalNone"
   endpoint {
     name: "data.OptionalNone"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapClear.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapClear.pbtxt
index e36b2aa3e4f..30c9cc626d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapClear.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapClear.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapClear"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapIncompleteSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapIncompleteSize.pbtxt
index 27d68e2d99d..ef2e4e4f272 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapIncompleteSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapIncompleteSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapIncompleteSize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapPeek.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapPeek.pbtxt
index 06fc2182773..2ac8b71d2fb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapPeek.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapPeek.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapPeek"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapSize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapSize.pbtxt
index 30e6215a0ee..47e4f6188ce 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapSize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapSize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapSize"
   out_arg {
     name: "size"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapStage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapStage.pbtxt
index 8b579d21a0e..22a96eaccb0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapStage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapStage.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapStage"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstage.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstage.pbtxt
index d3d6862fbe0..b617e0ad11e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstage.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstage.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapUnstage"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstageNoKey.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstageNoKey.pbtxt
index 3d275c85d94..e5beaff8915 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstageNoKey.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OrderedMapUnstageNoKey.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OrderedMapUnstageNoKey"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeue.pbtxt
index 15f5d29a2b6..6a8b5d5f562 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedDequeue"
   endpoint {
     name: "tpu.OutfeedDequeue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTuple.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTuple.pbtxt
index 3e15876cfbf..9b8a9b7bebd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTuple.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTuple.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedDequeueTuple"
   endpoint {
     name: "tpu.OutfeedDequeueTuple"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTupleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTupleV2.pbtxt
index 7fdebc8a3bd..7fef814d5b3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTupleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueTupleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedDequeueTupleV2"
   endpoint {
     name: "tpu.OutfeedDequeueTupleV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueV2.pbtxt
index cc61dd2a047..02c947f23f8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedDequeueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedDequeueV2"
   endpoint {
     name: "tpu.OutfeedDequeueV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueue.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueue.pbtxt
index 02be5eee442..18694993470 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueue.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueue.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedEnqueue"
   endpoint {
     name: "tpu.OutfeedEnqueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueueTuple.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueueTuple.pbtxt
index 9b56fecc72c..e4347fe0bcd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueueTuple.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_OutfeedEnqueueTuple.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "OutfeedEnqueueTuple"
   endpoint {
     name: "tpu.OutfeedEnqueueTuple"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Pack.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Pack.pbtxt
index d9e9897d77c..2be5e46f791 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Pack.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Pack.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Pack"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PadV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PadV2.pbtxt
index ffc12164560..2462c556cf3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PadV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PadV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PadV2"
   endpoint {
     name: "Pad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PaddingFIFOQueueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PaddingFIFOQueueV2.pbtxt
index 605025be791..d0b4a712ff8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PaddingFIFOQueueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PaddingFIFOQueueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PaddingFIFOQueueV2"
   endpoint {
     name: "io.PaddingFifoQueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelConcat.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelConcat.pbtxt
index 0b17c7d2569..cead44173d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelConcat.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelConcat.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParallelConcat"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelDynamicStitch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelDynamicStitch.pbtxt
index 79a55b763f3..a8bebe9f4f5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelDynamicStitch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelDynamicStitch.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParallelDynamicStitch"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelFilterDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelFilterDataset.pbtxt
index 4536ba19dbc..32e189b0963 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelFilterDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParallelFilterDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParallelFilterDataset"
   endpoint {
     name: "data.ParallelFilterDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParameterizedTruncatedNormal.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParameterizedTruncatedNormal.pbtxt
index 26ca2fc86fa..e271245d703 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParameterizedTruncatedNormal.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParameterizedTruncatedNormal.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParameterizedTruncatedNormal"
   endpoint {
     name: "random.ParameterizedTruncatedNormal"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseExampleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseExampleV2.pbtxt
index 62f76eb8ccc..c78eb77249c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseExampleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseExampleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParseExampleV2"
   endpoint {
     name: "io.ParseExample"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSequenceExampleV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSequenceExampleV2.pbtxt
index 6a873ad57c7..3ce6e01560f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSequenceExampleV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSequenceExampleV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParseSequenceExampleV2"
   endpoint {
     name: "io.ParseSequenceExample"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleExample.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleExample.pbtxt
index 7559957b35e..d2dfdc20ea2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleExample.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleExample.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParseSingleExample"
   endpoint {
     name: "io.ParseSingleExample"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleSequenceExample.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleSequenceExample.pbtxt
index 00eb325b2a4..2a83b9105e0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleSequenceExample.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseSingleSequenceExample.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParseSingleSequenceExample"
   endpoint {
     name: "io.ParseSingleSequenceExample"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseTensor.pbtxt
index a78cdc7f5c4..e8e5db934af 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseTensor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ParseTensor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ParseTensor"
   endpoint {
     name: "io.ParseTensor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PartitionedCall.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PartitionedCall.pbtxt
index 1ac10b60280..268c519a8a7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PartitionedCall.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PartitionedCall.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PartitionedCall"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Placeholder.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Placeholder.pbtxt
index 5e6daa2ae42..2e83fe4d8f8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Placeholder.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Placeholder.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Placeholder"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PlaceholderWithDefault.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PlaceholderWithDefault.pbtxt
index 59067a9c688..d20aff9cb92 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PlaceholderWithDefault.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PlaceholderWithDefault.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PlaceholderWithDefault"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Polygamma.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Polygamma.pbtxt
index 746b3375a0f..f81d95924f1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Polygamma.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Polygamma.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Polygamma"
   endpoint {
     name: "math.Polygamma"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PopulationCount.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PopulationCount.pbtxt
index 6aacdf4d121..840404a23d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PopulationCount.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PopulationCount.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PopulationCount"
   endpoint {
     name: "math.PopulationCount"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Pow.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Pow.pbtxt
index e7eaaed6952..8657e4afb98 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Pow.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Pow.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Pow"
   endpoint {
     name: "math.Pow"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Prelinearize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Prelinearize.pbtxt
index 3d8c0d77a16..71d98c0868f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Prelinearize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Prelinearize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Prelinearize"
   endpoint {
     name: "tpu.Prelinearize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PrelinearizeTuple.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PrelinearizeTuple.pbtxt
index e00a3b58b82..59751130dfc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PrelinearizeTuple.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PrelinearizeTuple.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PrelinearizeTuple"
   endpoint {
     name: "tpu.PrelinearizeTuple"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PreventGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PreventGradient.pbtxt
index 4731f21af4c..bafa0a5a739 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PreventGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PreventGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PreventGradient"
   endpoint {
     name: "train.PreventGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PrintV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PrintV2.pbtxt
index d1e4d74b1e3..573751c55b8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PrintV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PrintV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PrintV2"
   endpoint {
     name: "Print"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_PriorityQueueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_PriorityQueueV2.pbtxt
index 1f6a6f2906b..8bd3b0a04dc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_PriorityQueueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_PriorityQueueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "PriorityQueueV2"
   endpoint {
     name: "io.PriorityQueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Prod.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Prod.pbtxt
index 7a98972a871..d1c62ee4c7d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Prod.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Prod.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Prod"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Qr.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Qr.pbtxt
index c0d31e79778..13b372131af 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Qr.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Qr.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Qr"
   endpoint {
     name: "linalg.Qr"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV3.pbtxt
index ce20ef12386..49b0b0d4878 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV3.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV3.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizeAndDequantizeV3"
   endpoint {
     name: "quantization.QuantizeAndDequantizeV3"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4.pbtxt
index 9920bcd06a4..4e9780f470c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizeAndDequantizeV4"
   endpoint {
     name: "quantization.QuantizeAndDequantizeV4"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4Grad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4Grad.pbtxt
index 49ee4ab14a7..3c86a135f58 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4Grad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeAndDequantizeV4Grad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizeAndDequantizeV4Grad"
   endpoint {
     name: "quantization.QuantizeAndDequantizeV4Grad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeDownAndShrinkRange.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeDownAndShrinkRange.pbtxt
index 7119f53cb29..ac2dc64b29b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeDownAndShrinkRange.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeDownAndShrinkRange.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizeDownAndShrinkRange"
   endpoint {
     name: "quantization.QuantizeDownAndShrinkRange"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeV2.pbtxt
index 25c9c3bdce4..8dd0155b0cc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizeV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizeV2"
   endpoint {
     name: "quantization.Quantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAdd.pbtxt
index 1a2bfa36ed7..409160600a2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAdd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedAdd"
   endpoint {
     name: "math.QuantizedAdd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAvgPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAvgPool.pbtxt
index 7f16fb046dd..4f6112fd2d6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAvgPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedAvgPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedAvgPool"
   endpoint {
     name: "nn.QuantizedAvgPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt
index 2101ce5d690..f83d5c2433a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedBatchNormWithGlobalNormalization"
   endpoint {
     name: "nn.QuantizedBatchNormWithGlobalNormalization"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBiasAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBiasAdd.pbtxt
index c18fb1c5741..42af03225d9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBiasAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedBiasAdd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedBiasAdd"
   endpoint {
     name: "nn.QuantizedBiasAdd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConcat.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConcat.pbtxt
index 98ad8f92ac6..6f494b440b1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConcat.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConcat.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConcat"
   endpoint {
     name: "quantization.QuantizedConcat"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2D.pbtxt
index 9909157603e..a6e20f4585d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2D"
   endpoint {
     name: "nn.QuantizedConv2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRelu.pbtxt
index 6f808e6ec3d..11babc82e64 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DAndRelu"
   endpoint {
     name: "nn.QuantizedConv2DAndRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndReluAndRequantize.pbtxt
index 78f9b567a8c..69598eb29e7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DAndReluAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRequantize.pbtxt
index d28e8d45855..074c8bb81dc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DPerChannel.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DPerChannel.pbtxt
index c9af2597c01..8e0ad23bd42 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DPerChannel.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DPerChannel.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DPerChannel"
   endpoint {
     name: "nn.QuantizedConv2DPerChannel"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBias.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBias.pbtxt
index 3eb73496eb3..bfb35fd99ee 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBias.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBias.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBias"
   endpoint {
     name: "nn.QuantizedConv2DWithBias"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRelu.pbtxt
index c889787cf35..094b5484db9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasAndRelu"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasAndRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt
index e24261bb2ba..45a9ae59f11 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasAndReluAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRequantize.pbtxt
index f280bd25ff0..e2360686b4a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt
index ef2f2a9da36..16c15d1bcbb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndRelu.pbtxt
index c1fa4efcdf6..210d5287924 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasSumAndRelu"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasSumAndRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt
index 08c7dc22698..910800ac4f0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedConv2DWithBiasSumAndReluAndRequantize"
   endpoint {
     name: "nn.QuantizedConv2DWithBiasSumAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2D.pbtxt
index 8b77dac4062..cfcc863566b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedDepthwiseConv2D"
   endpoint {
     name: "nn.QuantizedDepthwiseConv2D"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBias.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBias.pbtxt
index e69ceccf887..961de7a11f7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBias.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBias.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedDepthwiseConv2DWithBias"
   endpoint {
     name: "nn.QuantizedDepthwiseConv2DWithBias"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt
index 72fac5d7da9..4470675b660 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedDepthwiseConv2DWithBiasAndRelu"
   endpoint {
     name: "nn.QuantizedDepthwiseConv2DWithBiasAndRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt
index 823e41213a5..e2673935a16 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize"
   endpoint {
     name: "nn.QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedInstanceNorm.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedInstanceNorm.pbtxt
index bbd2e7fc5ec..52620d0f998 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedInstanceNorm.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedInstanceNorm.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedInstanceNorm"
   endpoint {
     name: "nn.QuantizedInstanceNorm"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMul.pbtxt
index 7962cbade6f..40f0a5e788c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMul.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMul"
   endpoint {
     name: "linalg.QuantizedMatMul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBias.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBias.pbtxt
index 3642c953987..65cd7780258 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBias.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBias.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMulWithBias"
   endpoint {
     name: "linalg.QuantizedMatMulWithBias"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndDequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndDequantize.pbtxt
index 45af20c078c..2c47dfba1b0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndDequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndDequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMulWithBiasAndDequantize"
   endpoint {
     name: "quantization.QuantizedMatMulWithBiasAndDequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRelu.pbtxt
index eba6765ac5d..9f7d19c4203 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMulWithBiasAndRelu"
   endpoint {
     name: "linalg.QuantizedMatMulWithBiasAndRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt
index 76c88313eaa..548eeb7b9ef 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMulWithBiasAndReluAndRequantize"
   endpoint {
     name: "linalg.QuantizedMatMulWithBiasAndReluAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRequantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRequantize.pbtxt
index 6540e8a93fe..24994b5662f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRequantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMatMulWithBiasAndRequantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMatMulWithBiasAndRequantize"
   endpoint {
     name: "quantization.QuantizedMatMulWithBiasAndRequantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMaxPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMaxPool.pbtxt
index 57e900494e9..40f6f65c9b1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMaxPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMaxPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMaxPool"
   endpoint {
     name: "nn.QuantizedMaxPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMul.pbtxt
index be23ef706e2..6b14b69beb5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedMul.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedMul"
   endpoint {
     name: "math.QuantizedMul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu.pbtxt
index 7b9a11640ba..8e1b314e688 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedRelu"
   endpoint {
     name: "nn.QuantizedRelu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu6.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu6.pbtxt
index 6a60e2112ee..f5230201707 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu6.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedRelu6.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedRelu6"
   endpoint {
     name: "nn.QuantizedRelu6"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReluX.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReluX.pbtxt
index cc47d322b27..a52915868d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReluX.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReluX.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedReluX"
   endpoint {
     name: "nn.QuantizedReluX"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReshape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReshape.pbtxt
index 4557853d94f..f2049b9f380 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReshape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedReshape.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedReshape"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedResizeBilinear.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedResizeBilinear.pbtxt
index 81dca490944..28191a12de9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedResizeBilinear.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QuantizedResizeBilinear.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QuantizedResizeBilinear"
   endpoint {
     name: "image.QuantizedResizeBilinear"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueCloseV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueCloseV2.pbtxt
index ce779650e5b..08c2af13ab5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueCloseV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueCloseV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueCloseV2"
   endpoint {
     name: "io.QueueClose"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueManyV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueManyV2.pbtxt
index 10fe198ff26..e0cdf5ce764 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueManyV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueManyV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueDequeueManyV2"
   endpoint {
     name: "io.QueueDequeueMany"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueUpToV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueUpToV2.pbtxt
index fadea0926b6..715b614ccda 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueUpToV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueUpToV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueDequeueUpToV2"
   endpoint {
     name: "io.QueueDequeueUpTo"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueV2.pbtxt
index 7ba03afbfeb..670a81b09c6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueDequeueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueDequeueV2"
   endpoint {
     name: "io.QueueDequeue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueManyV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueManyV2.pbtxt
index be3fed47896..8f08727b990 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueManyV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueManyV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueEnqueueManyV2"
   endpoint {
     name: "io.QueueEnqueueMany"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueV2.pbtxt
index e71a2211e1e..56700dbe62d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueEnqueueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueEnqueueV2"
   endpoint {
     name: "io.QueueEnqueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueIsClosedV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueIsClosedV2.pbtxt
index 148d313a6d3..e3c27b82fe8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueIsClosedV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueIsClosedV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueIsClosedV2"
   endpoint {
     name: "io.QueueIsClosed"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueSizeV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueSizeV2.pbtxt
index bc17c8daf96..f352e15e0c7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueSizeV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_QueueSizeV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "QueueSizeV2"
   endpoint {
     name: "io.QueueSize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT.pbtxt
index 9576600e756..708de1951ae 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RFFT"
   endpoint {
     name: "signal.Rfft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT2D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT2D.pbtxt
index 41d638b26a8..8488c65b5d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT2D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT2D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RFFT2D"
   endpoint {
     name: "signal.Rfft2d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT3D.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT3D.pbtxt
index 7a762d22e5c..09218cd6296 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT3D.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFT3D.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RFFT3D"
   endpoint {
     name: "signal.Rfft3d"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFTND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFTND.pbtxt
index 51476b79b65..4b46e0f0d31 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFTND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RFFTND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RFFTND"
   endpoint {
     name: "signal.RfftNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RGBToHSV.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RGBToHSV.pbtxt
index 1b35891ae2c..2172f52405b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RGBToHSV.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RGBToHSV.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RGBToHSV"
   endpoint {
     name: "image.RgbToHsv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedBincount.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedBincount.pbtxt
index b52b8ff650e..632be33d30d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedBincount.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedBincount.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedBincount"
   endpoint {
     name: "ragged.RaggedBincount"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCountSparseOutput.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCountSparseOutput.pbtxt
index 74597fd7b71..e74b9bd9d0a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCountSparseOutput.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCountSparseOutput.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedCountSparseOutput"
   endpoint {
     name: "ragged.RaggedCountSparseOutput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCross.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCross.pbtxt
index bebf9f47f78..7da3096c7ef 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCross.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedCross.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedCross"
   endpoint {
     name: "ragged.RaggedCross"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRows.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRows.pbtxt
index 83fc6eb549b..5f135f87e74 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRows.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRows.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedFillEmptyRows"
   endpoint {
     name: "ragged.RaggedFillEmptyRows"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRowsGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRowsGrad.pbtxt
index fd56e4c69cf..5f8f1790f32 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRowsGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedFillEmptyRowsGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedFillEmptyRowsGrad"
   endpoint {
     name: "ragged.RaggedFillEmptyRowsGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedGather.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedGather.pbtxt
index ac066f5d68c..10da3a31954 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedGather.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedGather.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedGather"
   endpoint {
     name: "ragged.RaggedGather"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedRange.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedRange.pbtxt
index 8853575bda6..6ec658b61fe 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedRange.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedRange.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedRange"
   endpoint {
     name: "ragged.RaggedRange"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorFromVariant.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorFromVariant.pbtxt
index 9594d2d6ac6..2067148bde1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorFromVariant.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorFromVariant.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedTensorFromVariant"
   endpoint {
     name: "ragged.RaggedTensorFromVariant"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToSparse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToSparse.pbtxt
index 1a658898238..c6d61a22606 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToSparse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToSparse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedTensorToSparse"
   endpoint {
     name: "ragged.RaggedTensorToSparse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToTensor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToTensor.pbtxt
index 25d8f380e2f..2bae8ad3de2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToTensor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToTensor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedTensorToTensor"
   endpoint {
     name: "ragged.RaggedTensorToTensor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariant.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariant.pbtxt
index 5da45895a35..3e4b2029a1b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariant.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariant.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedTensorToVariant"
   endpoint {
     name: "ragged.RaggedTensorToVariant"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariantGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariantGradient.pbtxt
index 53abb4a0d2f..a09acd4debd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariantGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RaggedTensorToVariantGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RaggedTensorToVariantGradient"
   endpoint {
     name: "ragged.RaggedTensorToVariantGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomCrop.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomCrop.pbtxt
index a3b8a3cecda..be299f2ed38 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomCrop.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomCrop.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomCrop"
   endpoint {
     name: "image.RandomCrop"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGamma.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGamma.pbtxt
index 927f2c56937..c8fbfbf0134 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGamma.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGamma.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomGamma"
   endpoint {
     name: "random.RandomGamma"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGammaGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGammaGrad.pbtxt
index 9257495c9bd..6d0b3466690 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGammaGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomGammaGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomGammaGrad"
   endpoint {
     name: "random.RandomGammaGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomIndexShuffle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomIndexShuffle.pbtxt
index 93c95a499cd..0af6f3e5b1e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomIndexShuffle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomIndexShuffle.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomIndexShuffle"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomPoissonV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomPoissonV2.pbtxt
index 5efe01bf401..09bdecdaa10 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomPoissonV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomPoissonV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomPoissonV2"
   endpoint {
     name: "random.RandomPoisson"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffle.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffle.pbtxt
index 6dcd12fd375..5d0d7a3b680 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffle.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffle.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomShuffle"
   endpoint {
     name: "random.RandomShuffle"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffleQueueV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffleQueueV2.pbtxt
index 779363303ca..4dd84fac74d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffleQueueV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomShuffleQueueV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomShuffleQueueV2"
   endpoint {
     name: "io.RandomShuffleQueue"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomStandardNormal.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomStandardNormal.pbtxt
index 413fc87bdf0..5ac99b9005b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomStandardNormal.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomStandardNormal.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomStandardNormal"
   endpoint {
     name: "random.RandomStandardNormal"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniform.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniform.pbtxt
index 2a93df83df6..bdec5ac99d7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniform.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniform.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomUniform"
   endpoint {
     name: "random.RandomUniform"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniformInt.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniformInt.pbtxt
index a1383f406a3..4102517f3de 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniformInt.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RandomUniformInt.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RandomUniformInt"
   endpoint {
     name: "random.RandomUniformInt"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Range.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Range.pbtxt
index 24f3787a8e3..dbda35b7374 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Range.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Range.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Range"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rank.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rank.pbtxt
index baa84aab10d..dc306a7ae56 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rank.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rank.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Rank"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadFile.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadFile.pbtxt
index f74250d42f7..8d2c022f428 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadFile.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadFile.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReadFile"
   endpoint {
     name: "io.ReadFile"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableOp.pbtxt
index 018886d5b82..7f053e301a6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableOp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableOp.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReadVariableOp"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableXlaSplitND.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableXlaSplitND.pbtxt
index 81374fce42a..5d6bfb45373 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableXlaSplitND.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReadVariableXlaSplitND.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReadVariableXlaSplitND"
   endpoint {
     name: "xla.ReadVariableSplitND"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumRecordsProducedV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumRecordsProducedV2.pbtxt
index 54a30abe187..13578bcba83 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumRecordsProducedV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumRecordsProducedV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderNumRecordsProducedV2"
   endpoint {
     name: "io.ReaderNumRecordsProduced"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
index 0904ba19e53..1a72c3be10a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderNumWorkUnitsCompletedV2"
   endpoint {
     name: "io.ReaderNumWorkUnitsCompleted"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadUpToV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadUpToV2.pbtxt
index 777d09fa2c6..06a316fbb70 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadUpToV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadUpToV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderReadUpToV2"
   endpoint {
     name: "io.ReaderReadUpTo"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadV2.pbtxt
index a5d45bd1db9..64bd40cdde8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderReadV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderReadV2"
   endpoint {
     name: "io.ReaderRead"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderResetV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderResetV2.pbtxt
index 265a3442f53..05bd5c48bc3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderResetV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderResetV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderResetV2"
   endpoint {
     name: "io.ReaderReset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderRestoreStateV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderRestoreStateV2.pbtxt
index 4728ce7796b..c53c47ff372 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderRestoreStateV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderRestoreStateV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderRestoreStateV2"
   endpoint {
     name: "io.ReaderRestoreState"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderSerializeStateV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderSerializeStateV2.pbtxt
index aa396095b17..ec18d3c71b6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderSerializeStateV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReaderSerializeStateV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReaderSerializeStateV2"
   endpoint {
     name: "io.ReaderSerializeState"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Real.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Real.pbtxt
index 3aaea928dec..3ddd3bc902a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Real.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Real.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Real"
   endpoint {
     name: "math.Real"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RealDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RealDiv.pbtxt
index 415bd29da04..366c95f2566 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RealDiv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RealDiv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RealDiv"
   endpoint {
     name: "math.RealDiv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Reciprocal.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Reciprocal.pbtxt
index 1c0d787c24e..bb6956bbe3c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Reciprocal.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Reciprocal.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Reciprocal"
   endpoint {
     name: "math.Reciprocal"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReciprocalGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReciprocalGrad.pbtxt
index 68879669b5f..57cc8c630e1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReciprocalGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReciprocalGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReciprocalGrad"
   endpoint {
     name: "math.ReciprocalGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RecordInput.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RecordInput.pbtxt
index c4807c68dee..bf8836b3d81 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RecordInput.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RecordInput.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RecordInput"
   endpoint {
     name: "random.RecordInput"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Recv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Recv.pbtxt
index f0d5d9f0705..6ba56fa3392 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Recv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Recv.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Recv"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RecvTPUEmbeddingActivations.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RecvTPUEmbeddingActivations.pbtxt
index 61e46884b36..05ce63f87aa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RecvTPUEmbeddingActivations.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RecvTPUEmbeddingActivations.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RecvTPUEmbeddingActivations"
   endpoint {
     name: "tpu.RecvTPUEmbeddingActivations"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReduceJoin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReduceJoin.pbtxt
index 79193222018..bb2b90169a1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReduceJoin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReduceJoin.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReduceJoin"
   endpoint {
     name: "strings.ReduceJoin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefEnter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefEnter.pbtxt
index 9cd2281bc6a..886f9cc3436 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefEnter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefEnter.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefEnter"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefExit.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefExit.pbtxt
index 67e8d39c9af..1495c957912 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefExit.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefExit.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefExit"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefIdentity.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefIdentity.pbtxt
index 53483bd1bb7..013b3bcce61 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefIdentity.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefIdentity.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefIdentity"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefMerge.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefMerge.pbtxt
index 6ea3145841b..97599f361be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefMerge.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefMerge.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefMerge"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefNextIteration.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefNextIteration.pbtxt
index 5d008204b7a..0b94ec2d5d0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefNextIteration.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefNextIteration.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefNextIteration"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSelect.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSelect.pbtxt
index d7cda2d5b30..dc135b3cc98 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSelect.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSelect.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefSelect"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSwitch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSwitch.pbtxt
index 78261d8b7e6..abccabbf444 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSwitch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RefSwitch.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RefSwitch"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexFullMatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexFullMatch.pbtxt
index 7f88e24eac6..ed0a0765b5c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexFullMatch.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexFullMatch.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RegexFullMatch"
   endpoint {
     name: "strings.RegexFullMatch"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexReplace.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexReplace.pbtxt
index 01c9e93cab7..a2987dba302 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexReplace.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RegexReplace.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RegexReplace"
   endpoint {
     name: "strings.RegexReplace"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relayout.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relayout.pbtxt
index 6eb3853eb8c..50f3036cc78 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relayout.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relayout.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Relayout"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RelayoutLike.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RelayoutLike.pbtxt
index 2abfdc15f19..b83aaf0cf61 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RelayoutLike.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RelayoutLike.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RelayoutLike"
   endpoint {
     name: "RelayoutLike"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu.pbtxt
index 39d7fec4526..87e110a0739 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Relu"
   endpoint {
     name: "nn.Relu"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6.pbtxt
index fcc012b5033..c1dc6c6d205 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Relu6"
   endpoint {
     name: "nn.Relu6"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6Grad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6Grad.pbtxt
index 33e959cc7b3..bb4621ffb5b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6Grad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Relu6Grad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Relu6Grad"
   endpoint {
     name: "nn.Relu6Grad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReluGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReluGrad.pbtxt
index ec4a8b5f972..7830ad371d6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReluGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReluGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReluGrad"
   endpoint {
     name: "nn.ReluGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteCall.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteCall.pbtxt
index a0ddb017847..b2f13cc48b9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteCall.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteCall.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RemoteCall"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteFusedGraphExecute.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteFusedGraphExecute.pbtxt
index b73e633ef2c..c30673aa76e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteFusedGraphExecute.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RemoteFusedGraphExecute.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RemoteFusedGraphExecute"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRange.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRange.pbtxt
index d365e899255..81e17cf420d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRange.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRange.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RequantizationRange"
   endpoint {
     name: "quantization.RequantizationRange"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRangePerChannel.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRangePerChannel.pbtxt
index 7630f0d58be..2073052bfe9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRangePerChannel.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizationRangePerChannel.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RequantizationRangePerChannel"
   endpoint {
     name: "math.RequantizationRangePerChannel"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Requantize.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Requantize.pbtxt
index d397cde4db9..c771cef0746 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Requantize.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Requantize.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Requantize"
   endpoint {
     name: "quantization.Requantize"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizePerChannel.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizePerChannel.pbtxt
index 3cbf3ed48f7..2539fbe9528 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizePerChannel.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RequantizePerChannel.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RequantizePerChannel"
   endpoint {
     name: "math.RequantizePerChannel"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Reshape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Reshape.pbtxt
index 4bf3a409d1a..bd628df6d68 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Reshape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Reshape.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Reshape"
   endpoint {
     name: "Reshape"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeArea.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeArea.pbtxt
index 5358c18d4b4..2514478bc1e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeArea.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeArea.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeArea"
   endpoint {
     name: "image.ResizeArea"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubic.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubic.pbtxt
index 0d0942e2662..669b0889911 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubic.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubic.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeBicubic"
   endpoint {
     name: "image.ResizeBicubic"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubicGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubicGrad.pbtxt
index 12e61dc8238..63478567394 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubicGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBicubicGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeBicubicGrad"
   endpoint {
     name: "image.ResizeBicubicGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinear.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinear.pbtxt
index ad123744a92..42bc9578c0b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinear.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinear.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeBilinear"
   endpoint {
     name: "image.ResizeBilinear"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinearGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinearGrad.pbtxt
index 04f3e9f19ef..88bccdf83ca 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinearGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeBilinearGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeBilinearGrad"
   endpoint {
     name: "image.ResizeBilinearGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighbor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighbor.pbtxt
index 86ad39a5173..84f8e26218d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighbor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighbor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeNearestNeighbor"
   endpoint {
     name: "image.ResizeNearestNeighbor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighborGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighborGrad.pbtxt
index 70eeb906fab..2b5ce61b1cf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighborGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResizeNearestNeighborGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResizeNearestNeighborGrad"
   endpoint {
     name: "image.ResizeNearestNeighborGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorApplyGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorApplyGradient.pbtxt
index 032baafbf7e..2463e311f36 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorApplyGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorApplyGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceAccumulatorApplyGradient"
   endpoint {
     name: "train.ResourceAccumulatorApplyGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorNumAccumulated.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorNumAccumulated.pbtxt
index 9bbd06f6e07..414247dc55b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorNumAccumulated.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorNumAccumulated.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceAccumulatorNumAccumulated"
   endpoint {
     name: "train.ResourceAccumulatorNumAccumulated"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorSetGlobalStep.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorSetGlobalStep.pbtxt
index 171c7651371..02083395b15 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorSetGlobalStep.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorSetGlobalStep.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceAccumulatorSetGlobalStep"
   endpoint {
     name: "train.ResourceAccumulatorSetGlobalStep"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorTakeGradient.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorTakeGradient.pbtxt
index 518a496b88a..7d7fbd9c9ba 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorTakeGradient.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceAccumulatorTakeGradient.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceAccumulatorTakeGradient"
   endpoint {
     name: "train.ResourceAccumulatorTakeGradient"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdaMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdaMax.pbtxt
index ff57bd5849a..cbe0abd7fd2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdaMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdaMax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdaMax"
   endpoint {
     name: "train.ResourceApplyAdaMax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdadelta.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdadelta.pbtxt
index d4369f0eade..11ea32f0474 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdadelta.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdadelta.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdadelta"
   endpoint {
     name: "train.ResourceApplyAdadelta"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradDA.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradDA.pbtxt
index afddaaff573..7de1a78a3e7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradDA.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradDA.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdagradDA"
   endpoint {
     name: "train.ResourceApplyAdagradDa"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradV2.pbtxt
index 71def75670a..4b2cdf69a9b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdagradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdagradV2"
   endpoint {
     name: "train.ResourceApplyAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdam.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdam.pbtxt
index 390bd999c45..13b9b145b78 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdam.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdam.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdam"
   endpoint {
     name: "train.ResourceApplyAdam"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdamWithAmsgrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdamWithAmsgrad.pbtxt
index 503e3b142b3..3afb7a28c5c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdamWithAmsgrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAdamWithAmsgrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAdamWithAmsgrad"
   endpoint {
     name: "train.ResourceApplyAdamWithAmsgrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAddSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAddSign.pbtxt
index bf944477be3..8e57cf8d4c9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAddSign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyAddSign.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyAddSign"
   endpoint {
     name: "train.ResourceApplyAddSign"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyCenteredRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyCenteredRMSProp.pbtxt
index 85c97b430a8..5bc55386fb3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyCenteredRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyCenteredRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyCenteredRMSProp"
   endpoint {
     name: "train.ResourceApplyCenteredRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyFtrlV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyFtrlV2.pbtxt
index 8209fd607e1..db4e93ed80e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyFtrlV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyFtrlV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyFtrlV2"
   endpoint {
     name: "train.ResourceApplyFtrl"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyGradientDescent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyGradientDescent.pbtxt
index a54fed14d17..48a55a96cc1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyGradientDescent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyGradientDescent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyGradientDescent"
   endpoint {
     name: "train.ResourceApplyGradientDescent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyKerasMomentum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyKerasMomentum.pbtxt
index 2d76993dc47..35b88fc8869 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyKerasMomentum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyKerasMomentum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyKerasMomentum"
   endpoint {
     name: "train.ResourceApplyKerasMomentum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyMomentum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyMomentum.pbtxt
index debb0a8131e..ea88f416f0f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyMomentum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyMomentum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyMomentum"
   endpoint {
     name: "train.ResourceApplyMomentum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyPowerSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyPowerSign.pbtxt
index 96df22c81f0..c2a67f1fee3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyPowerSign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyPowerSign.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyPowerSign"
   endpoint {
     name: "train.ResourceApplyPowerSign"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalAdagrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalAdagrad.pbtxt
index 809b8b3af3b..c022658a317 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalAdagrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalAdagrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyProximalAdagrad"
   endpoint {
     name: "train.ResourceApplyProximalAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalGradientDescent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalGradientDescent.pbtxt
index c9ff5a499d5..a209ab6a065 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalGradientDescent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyProximalGradientDescent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyProximalGradientDescent"
   endpoint {
     name: "train.ResourceApplyProximalGradientDescent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyRMSProp.pbtxt
index fa3adf759e0..7e5a287fbdf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceApplyRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceApplyRMSProp"
   endpoint {
     name: "train.ResourceApplyRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceConditionalAccumulator.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceConditionalAccumulator.pbtxt
index 9289bdefc55..9b23eb1891c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceConditionalAccumulator.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceConditionalAccumulator.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceConditionalAccumulator"
   endpoint {
     name: "train.ResourceConditionalAccumulator"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceCountUpTo.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceCountUpTo.pbtxt
index 439c1f17557..4c1309f160d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceCountUpTo.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceCountUpTo.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceCountUpTo"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGather.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGather.pbtxt
index 79c6e8abbcb..a9b829ebd04 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGather.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGather.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceGather"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGatherNd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGatherNd.pbtxt
index 339a7234c3f..ec282febc2b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGatherNd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceGatherNd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceGatherNd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterAdd.pbtxt
index e4184e33bf0..33b6d9c67d6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterDiv.pbtxt
index 3e21c24a588..b32181fde11 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterDiv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterDiv.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterDiv"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMax.pbtxt
index d25b14272d6..e758222d6ed 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMax.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterMax"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMin.pbtxt
index 6243cc1ae3e..bce335396b4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMin.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterMin"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMul.pbtxt
index 393e5556c0b..4740ed6669c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterMul.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterMul"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdAdd.pbtxt
index 2fd38f7be87..29e9541aac8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterNdAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMax.pbtxt
index b042ac6c22b..2b2382e88b7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMax.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterNdMax"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMin.pbtxt
index c3c617112b2..bad7c7741b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdMin.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterNdMin"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdSub.pbtxt
index 7733debc1f9..5dad023a56a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterNdSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdUpdate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdUpdate.pbtxt
index 732de5f1cc2..72d079bef41 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdUpdate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterNdUpdate.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterNdUpdate"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterSub.pbtxt
index 77081dda4d5..ca9e5fa6a25 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterUpdate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterUpdate.pbtxt
index 9c2cc0ec210..bd850c7bcd2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterUpdate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceScatterUpdate.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceScatterUpdate"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdadelta.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdadelta.pbtxt
index c6e3ae2219c..7614ee61566 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdadelta.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdadelta.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyAdadelta"
   endpoint {
     name: "train.ResourceSparseApplyAdadelta"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagrad.pbtxt
index 5be4d6199b5..3acd27409f1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyAdagrad"
   endpoint {
     name: "train.ResourceSparseApplyAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradDA.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradDA.pbtxt
index 0547687d640..dff8e161d04 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradDA.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradDA.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyAdagradDA"
   endpoint {
     name: "train.ResourceSparseApplyAdagradDa"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradV2.pbtxt
index 65d6ac18ecb..f86922242c7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyAdagradV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyAdagradV2"
   endpoint {
     name: "train.ResourceSparseApplyAdagradV2"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt
index 632b0ab4c20..0f402d6bb96 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyCenteredRMSProp"
   endpoint {
     name: "train.ResourceSparseApplyCenteredRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyFtrlV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyFtrlV2.pbtxt
index cd126d78ab6..553da2bcb6f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyFtrlV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyFtrlV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyFtrlV2"
   endpoint {
     name: "train.ResourceSparseApplyFtrl"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyKerasMomentum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyKerasMomentum.pbtxt
index 78ba4775304..8c39775ba83 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyKerasMomentum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyKerasMomentum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyKerasMomentum"
   endpoint {
     name: "train.ResourceSparseApplyKerasMomentum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyMomentum.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyMomentum.pbtxt
index 7e00039e014..d165cf2f94e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyMomentum.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyMomentum.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyMomentum"
   endpoint {
     name: "train.ResourceSparseApplyMomentum"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt
index 04fe8504e5d..a97d3c5d608 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyProximalAdagrad"
   endpoint {
     name: "train.ResourceSparseApplyProximalAdagrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt
index 27df43c9c50..69db57fbc14 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyProximalGradientDescent"
   endpoint {
     name: "train.ResourceSparseApplyProximalGradientDescent"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyRMSProp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyRMSProp.pbtxt
index ec8910a88a7..3cac8411190 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyRMSProp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceSparseApplyRMSProp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceSparseApplyRMSProp"
   endpoint {
     name: "train.ResourceSparseApplyRmsProp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceStridedSliceAssign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceStridedSliceAssign.pbtxt
index 83805389b98..bf142658402 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceStridedSliceAssign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ResourceStridedSliceAssign.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ResourceStridedSliceAssign"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreSlice.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreSlice.pbtxt
index 0728f5908b2..d49abdc2abf 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreSlice.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreSlice.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RestoreSlice"
   endpoint {
     name: "train.RestoreSlice"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreV2.pbtxt
index 909968873f2..f73221177e7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RestoreV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RestoreV2"
   endpoint {
     name: "train.Restore"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveAllTPUEmbeddingParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveAllTPUEmbeddingParameters.pbtxt
index d0c6b42fafd..0918a4bbd71 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveAllTPUEmbeddingParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveAllTPUEmbeddingParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveAllTPUEmbeddingParameters"
   endpoint {
     name: "tpu.RetrieveAllTPUEmbeddingParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParameters.pbtxt
index 4ba2dcfd019..6fa45ac4709 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingADAMParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingADAMParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
index c185287ab80..19024de237a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingADAMParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingADAMParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingADAMParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParameters.pbtxt
index b520470d94a..608071b458b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingAdadeltaParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingAdadeltaParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
index 3e4226d1e29..ce7f843c0d3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt
index 506117ac2b7..c086360d4fb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingAdagradMomentumParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingAdagradMomentumParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParameters.pbtxt
index 8260e0b2cc2..2829ab63f30 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingAdagradParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingAdagradParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
index 5c0d7d42f0e..08a26da1fc2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingAdagradParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt
index 722fde799ae..b339631e163 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingCenteredRMSPropParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingCenteredRMSPropParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParameters.pbtxt
index 8d0c6ee79da..de9f9931616 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingFTRLParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingFTRLParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
index b5ce64d483d..57b3e0e2e28 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingFTRLParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt
index 02da67a33d4..a30b2e979d9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingFrequencyEstimatorParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
index 5f54c810a4f..eff5462872f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt
index 040018699c7..c4320af9050 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingMDLAdagradLightParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingMDLAdagradLightParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParameters.pbtxt
index 017d38ce98c..cae7612c2b2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingMomentumParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingMomentumParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
index fdcd930b1c9..c3d1eea0d1d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingMomentumParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt
index 7e88ab316d8..a6a7b7d8582 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingProximalAdagradParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingProximalAdagradParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
index 16b7e25975d..8f0cba646fd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParameters.pbtxt
index 618d0cda5e9..3e516888ec3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingProximalYogiParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingProximalYogiParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
index a04bed75d87..26a810e8794 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParameters.pbtxt
index cb56f096e8b..03b991ee6b4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingRMSPropParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingRMSPropParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
index 5eb9017d4d2..2a873e27fc3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt
index 755ffdd69e5..a0377103d49 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingStochasticGradientDescentParameters"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingStochasticGradientDescentParameters"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
index 6d476f70bd5..71758e43589 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"
   endpoint {
     name: "tpu.RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseSequence.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseSequence.pbtxt
index 87638c0dcc9..f0e6bd4a1cc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseSequence.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseSequence.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReverseSequence"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseV2.pbtxt
index 71efbe1892e..c286316354f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ReverseV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ReverseV2"
   endpoint {
     name: "Reverse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RewriteDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RewriteDataset.pbtxt
index fc093decb92..cd73223372b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RewriteDataset.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RewriteDataset.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RewriteDataset"
   endpoint {
     name: "data.RewriteDataset"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RightShift.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RightShift.pbtxt
index 68fab3e8cf8..8f6889fd4d5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RightShift.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RightShift.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RightShift"
   endpoint {
     name: "bitwise.RightShift"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rint.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rint.pbtxt
index 48fbcc7c346..0bf2aa48f28 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rint.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rint.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Rint"
   endpoint {
     name: "math.Rint"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAbs.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAbs.pbtxt
index c2ab94f053b..16a02df71a8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAbs.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAbs.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscAbs"
   endpoint {
     name: "risc.RiscAbs"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAdd.pbtxt
index 5694b59c62f..db1cafd86b1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscAdd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscAdd"
   endpoint {
     name: "risc.RiscAdd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryArithmetic.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryArithmetic.pbtxt
index 910399fa401..a6b0d3849d9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryArithmetic.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryArithmetic.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscBinaryArithmetic"
   endpoint {
     name: "risc.RiscBinaryArithmetic"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryComparison.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryComparison.pbtxt
index 014e43b1444..b278cdb19b5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryComparison.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBinaryComparison.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscBinaryComparison"
   endpoint {
     name: "risc.RiscBinaryComparison"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBitcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBitcast.pbtxt
index 3393f70a8b5..3576ea43316 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBitcast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBitcast.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscBitcast"
   endpoint {
     name: "risc.RiscBitcast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBroadcast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBroadcast.pbtxt
index 755892ca968..70f651c5595 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBroadcast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscBroadcast.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscBroadcast"
   endpoint {
     name: "risc.RiscBroadcast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCast.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCast.pbtxt
index d1bffc26bff..03d2dddb2a7 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCast.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCast.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscCast"
   endpoint {
     name: "risc.RiscCast"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCeil.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCeil.pbtxt
index 286b8298d51..7cc1796e649 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCeil.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCeil.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscCeil"
   endpoint {
     name: "risc.RiscCeil"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCholesky.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCholesky.pbtxt
index cdb5975e035..f58e0969b02 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCholesky.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCholesky.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscCholesky"
   endpoint {
     name: "risc.RiscCholesky"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConcat.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConcat.pbtxt
index 670cb46be04..e5aad1d665c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConcat.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConcat.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscConcat"
   endpoint {
     name: "risc.RiscConcat"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCondition.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCondition.pbtxt
index 2284aeed689..20b4043192e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCondition.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCondition.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscCondition"
   endpoint {
     name: "risc.RiscCondition"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConv.pbtxt
index 4e2342a8da9..3b85466aad8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscConv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscConv"
   endpoint {
     name: "risc.RiscConv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCos.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCos.pbtxt
index d9905d7e1b0..bd0bd4faa20 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCos.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscCos.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscCos"
   endpoint {
     name: "risc.RiscCos"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDiv.pbtxt
index 651d569b479..62752229c2b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDiv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDiv.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscDiv"
   endpoint {
     name: "risc.RiscDiv"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDot.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDot.pbtxt
index 4eac65da4f8..884d0093f49 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDot.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscDot.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscDot"
   endpoint {
     name: "risc.RiscDot"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscExp.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscExp.pbtxt
index 35bb77b83c6..a0f735e9812 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscExp.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscExp.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscExp"
   endpoint {
     name: "risc.RiscExp"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFft.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFft.pbtxt
index a3dcbe69337..7939ade66d4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFft.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFft.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscFft"
   endpoint {
     name: "risc.RiscFft"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFloor.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFloor.pbtxt
index 9f5d762d1a4..4bbf58f30be 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFloor.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscFloor.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscFloor"
   endpoint {
     name: "risc.RiscFloor"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscGather.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscGather.pbtxt
index c4fe724889d..65e03eabd05 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscGather.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscGather.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscGather"
   endpoint {
     name: "risc.RiscGather"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscImag.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscImag.pbtxt
index 70d8136856b..c8473b54de1 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscImag.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscImag.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscImag"
   endpoint {
     name: "risc.RiscImag"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscIsFinite.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscIsFinite.pbtxt
index 5418f7a9906..9155259eba0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscIsFinite.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscIsFinite.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscIsFinite"
   endpoint {
     name: "risc.RiscIsFinite"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLog.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLog.pbtxt
index b0bb8f3aaed..c8e1afe2a75 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLog.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLog.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscLog"
   endpoint {
     name: "risc.RiscLog"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalAnd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalAnd.pbtxt
index 1ccb0264901..bc2d5b1f9eb 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalAnd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalAnd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscLogicalAnd"
   endpoint {
     name: "risc.RiscLogicalAnd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalNot.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalNot.pbtxt
index 6f97af1c7b6..c4743d410b3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalNot.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalNot.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscLogicalNot"
   endpoint {
     name: "risc.RiscLogicalNot"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalOr.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalOr.pbtxt
index 97e37710419..f23f059b514 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalOr.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscLogicalOr.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscLogicalOr"
   endpoint {
     name: "risc.RiscLogicalOr"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMax.pbtxt
index 240f8119a9e..06d25cbc86a 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMax.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscMax"
   endpoint {
     name: "risc.RiscMax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMin.pbtxt
index a8ccba66ae1..309d515d6fd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMin.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscMin"
   endpoint {
     name: "risc.RiscMin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMul.pbtxt
index 21fc1e0e336..51927d3a135 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscMul.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscMul"
   endpoint {
     name: "risc.RiscMul"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscNeg.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscNeg.pbtxt
index 894b769a72a..0e0dd0ea4b0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscNeg.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscNeg.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscNeg"
   endpoint {
     name: "risc.RiscNeg"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPad.pbtxt
index 729bba07740..0e3d478d02b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscPad"
   endpoint {
     name: "risc.RiscPad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPool.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPool.pbtxt
index 9ed6a55dd07..74cd28a15fd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPool.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPool.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscPool"
   endpoint {
     name: "risc.RiscPool"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPow.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPow.pbtxt
index 3eac196376f..2565bd11555 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPow.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscPow.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscPow"
   endpoint {
     name: "risc.RiscPow"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRandomUniform.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRandomUniform.pbtxt
index ef96f0a2796..942c4bec622 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRandomUniform.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRandomUniform.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscRandomUniform"
   endpoint {
     name: "risc.RiscRandomUniform"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReal.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReal.pbtxt
index 5b9691512fc..5d24d2ad837 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReal.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReal.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscReal"
   endpoint {
     name: "risc.RiscReal"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReduce.pbtxt
index d5d614c828e..bc9b20496e5 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReduce.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReduce.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscReduce"
   endpoint {
     name: "risc.RiscReduce"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRem.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRem.pbtxt
index 0bb38f8de55..22de8c713ea 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRem.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscRem.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscRem"
   endpoint {
     name: "risc.RiscRem"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReshape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReshape.pbtxt
index b2ab27447a3..fd3bacbd2d6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReshape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReshape.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscReshape"
   endpoint {
     name: "risc.RiscReshape"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReverse.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReverse.pbtxt
index ccb027a8859..ee8e646e4b9 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReverse.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscReverse.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscReverse"
   endpoint {
     name: "risc.RiscReverse"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscScatter.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscScatter.pbtxt
index 0eea45dcf04..dabe270375d 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscScatter.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscScatter.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscScatter"
   endpoint {
     name: "risc.RiscScatter"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscShape.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscShape.pbtxt
index dab7319a922..83666efcdf6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscShape.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscShape.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscShape"
   endpoint {
     name: "risc.RiscShape"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSign.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSign.pbtxt
index a157b69acbb..2cc5dfc378c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSign.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSign.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscSign"
   endpoint {
     name: "risc.RiscSign"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSlice.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSlice.pbtxt
index fc630149b69..ecb7b991196 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSlice.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSlice.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscSlice"
   endpoint {
     name: "risc.RiscSlice"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSort.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSort.pbtxt
index 2048ca7aab1..3361401d336 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSort.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSort.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscSort"
   endpoint {
     name: "risc.RiscSort"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSqueeze.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSqueeze.pbtxt
index f09b55721f9..5b9b50d209e 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSqueeze.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSqueeze.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscSqueeze"
   endpoint {
     name: "risc.RiscSqueeze"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSub.pbtxt
index 924d3f38189..ea48b182d22 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscSub.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscSub"
   endpoint {
     name: "risc.RiscSub"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTranspose.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTranspose.pbtxt
index 877ee6d6570..f2d3e739b50 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTranspose.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTranspose.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscTranspose"
   endpoint {
     name: "risc.RiscTranspose"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTriangularSolve.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTriangularSolve.pbtxt
index f74b9a88a86..70b4fbdeed4 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTriangularSolve.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscTriangularSolve.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscTriangularSolve"
   endpoint {
     name: "risc.RiscTriangularSolve"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscUnary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscUnary.pbtxt
index 429c93bff49..d1d03367c05 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscUnary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscUnary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscUnary"
   endpoint {
     name: "risc.RiscUnary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscWhile.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscWhile.pbtxt
index e4810438b46..745b47cdfab 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscWhile.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RiscWhile.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: SKIP
   graph_op_name: "RiscWhile"
   endpoint {
     name: "risc.RiscWhile"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RngReadAndSkip.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RngReadAndSkip.pbtxt
index 92057c8e041..8603fa95988 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RngReadAndSkip.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RngReadAndSkip.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RngReadAndSkip"
   endpoint {
     name: "random.RngReadAndSkip"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RngSkip.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RngSkip.pbtxt
index d9d3c6a1e73..9074f38c5da 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RngSkip.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RngSkip.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RngSkip"
   endpoint {
     name: "random.RngSkip"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Roll.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Roll.pbtxt
index 50f7915a65a..fe4eed9ab13 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Roll.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Roll.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Roll"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Round.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Round.pbtxt
index dd612a33d63..960ffba508f 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Round.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Round.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Round"
   endpoint {
     name: "math.Round"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rpc.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rpc.pbtxt
index 0d1e2b90e6a..528afe26709 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rpc.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rpc.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Rpc"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rsqrt.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rsqrt.pbtxt
index 06b1b81ecd4..97165e2d758 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_Rsqrt.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_Rsqrt.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "Rsqrt"
   endpoint {
     name: "math.Rsqrt"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_RsqrtGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_RsqrtGrad.pbtxt
index 88073b6f254..8aa9f02b9bc 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_RsqrtGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_RsqrtGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "RsqrtGrad"
   endpoint {
     name: "math.RsqrtGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SampleDistortedBoundingBoxV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SampleDistortedBoundingBoxV2.pbtxt
index 65573141378..0aef133b9e6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SampleDistortedBoundingBoxV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SampleDistortedBoundingBoxV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SampleDistortedBoundingBoxV2"
   endpoint {
     name: "image.SampleDistortedBoundingBox"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveSlices.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveSlices.pbtxt
index b3360356800..33af2108dd0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveSlices.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveSlices.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SaveSlices"
   endpoint {
     name: "train.SaveSlices"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveV2.pbtxt
index 644d1824aa1..0fc943f3540 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SaveV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SaveV2"
   endpoint {
     name: "train.Save"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScalarSummary.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScalarSummary.pbtxt
index c339ce0a7a5..7b6f6129353 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScalarSummary.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScalarSummary.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScalarSummary"
   endpoint {
     name: "summary.ScalarSummary"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslate.pbtxt
index 850f8effbe1..25364907a30 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslate.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScaleAndTranslate"
   endpoint {
     name: "image.ScaleAndTranslate"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslateGrad.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslateGrad.pbtxt
index 99bc3a92bb7..e3256e0f704 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslateGrad.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScaleAndTranslateGrad.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScaleAndTranslateGrad"
   endpoint {
     name: "image.ScaleAndTranslateGrad"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterAdd.pbtxt
index 41c63dc0a40..74492ab813b 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterDiv.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterDiv.pbtxt
index 5754249eafc..97252d64db3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterDiv.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterDiv.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterDiv"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMax.pbtxt
index aa6375cbd76..5217cb1f668 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMax.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterMax"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMin.pbtxt
index ea007120c36..c082832265c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMin.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterMin"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMul.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMul.pbtxt
index f1d91258e4b..4d284a527c2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMul.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterMul.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterMul"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNd.pbtxt
index 8ef01b2fcac..5d5308a7444 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNd.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNd"
   endpoint {
     name: "ScatterNd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdAdd.pbtxt
index bea152a9da5..61d9acdd48c 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMax.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMax.pbtxt
index 392f99b3bec..617c639add6 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMax.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMax.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdMax"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMin.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMin.pbtxt
index 9ced2ef3ba3..53d6754e0e3 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMin.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdMin.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdMin"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdNonAliasingAdd.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdNonAliasingAdd.pbtxt
index 4dd756bfc0c..98baca56e19 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdNonAliasingAdd.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdNonAliasingAdd.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdNonAliasingAdd"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdSub.pbtxt
index 384e79d64ef..867227b1507 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdUpdate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdUpdate.pbtxt
index 92fce7f0ac3..2c4432c9ed0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdUpdate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterNdUpdate.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterNdUpdate"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterSub.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterSub.pbtxt
index 5baaa4f6045..25a2e9519fa 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterSub.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterSub.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterSub"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterUpdate.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterUpdate.pbtxt
index 83ac128ed60..cfcff646652 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterUpdate.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_ScatterUpdate.pbtxt
@@ -1,3 +1,4 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "ScatterUpdate"
 }
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaFprint.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaFprint.pbtxt
index ce179918cd0..19725ee76d2 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaFprint.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaFprint.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SdcaFprint"
   endpoint {
     name: "train.SdcaFprint"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaOptimizerV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaOptimizerV2.pbtxt
index b72ee64e501..b67c06a7069 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaOptimizerV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaOptimizerV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SdcaOptimizerV2"
   endpoint {
     name: "train.SdcaOptimizer"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaShrinkL1.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaShrinkL1.pbtxt
index 83993bcf149..b65cd2a92c0 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaShrinkL1.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SdcaShrinkL1.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SdcaShrinkL1"
   endpoint {
     name: "train.SdcaShrinkL1"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMaxV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMaxV2.pbtxt
index 7b8d476dc67..58d1cce4a47 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMaxV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMaxV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SegmentMaxV2"
   endpoint {
     name: "math.SegmentMax"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMean.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMean.pbtxt
index 982db87bf09..78a64153e26 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMean.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMean.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SegmentMean"
   endpoint {
     name: "math.SegmentMean"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMinV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMinV2.pbtxt
index 84cbd30843d..8d3c1ea4bdd 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMinV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentMinV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SegmentMinV2"
   endpoint {
     name: "math.SegmentMin"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentProdV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentProdV2.pbtxt
index 53276c470cc..6ed9d2bf402 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentProdV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentProdV2.pbtxt
@@ -1,4 +1,5 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SegmentProdV2"
   endpoint {
     name: "math.SegmentProd"
diff --git a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentSumV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentSumV2.pbtxt
index aa200316606..4478e3a5fb8 100644
--- a/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentSumV2.pbtxt
+++ b/tensorflow-core/tensorflow-core-api/src/api/api_def_SegmentSumV2.pbtxt
@@ -1,6 +1,21 @@
 op {
+  visibility: VISIBLE
   graph_op_name: "SegmentSumV2"
   endpoint {
     name: "math.SegmentSum"
   }
+  description: <
* - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code BitwiseOr} output and operands @@ -121,7 +119,6 @@ public BitwiseOr bitwiseOr(Operand x, Operand y) { * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE *
* - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code BitwiseXor} output and operands @@ -172,7 +169,6 @@ public BitwiseXor bitwiseXor(Operand x, Operand y) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Invert} output and operands * @return a new instance of Invert @@ -212,7 +208,6 @@ public Invert invert(Operand x) { * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code LeftShift} output and operands @@ -255,7 +250,6 @@ public LeftShift leftShift(Operand x, Operand y) { * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code RightShift} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ClusterOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ClusterOps.java new file mode 100644 index 00000000000..e59e86f23ed --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ClusterOps.java @@ -0,0 +1,85 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.cluster.KMC2ChainInitialization; +import org.tensorflow.op.cluster.KmeansPlusPlusInitialization; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt64; + +/** + * An API for building {@code cluster} operations as {@link Op Op}s + * + * @see Ops + */ +public final class ClusterOps { + private final Scope scope; + + private final Ops ops; + + ClusterOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Returns the index of a data point that should be added to the seed set. + * Entries in distances are assumed to be squared distances of candidate points to + * the already sampled centers in the seed set. The op constructs one Markov chain + * of the k-MC^2 algorithm and returns the index of one candidate point to be added + * as an additional cluster center. + * + * @param distances Vector with squared distances to the closest previously sampled cluster center + * for each candidate point. + * @param seed Scalar. Seed for initializing the random number generator. + * @return a new instance of KMC2ChainInitialization + */ + public KMC2ChainInitialization kMC2ChainInitialization(Operand distances, + Operand seed) { + return KMC2ChainInitialization.create(scope, distances, seed); + } + + /** + * Selects num_to_sample rows of input using the KMeans++ criterion. + * Rows of points are assumed to be input points. One row is selected at random. + * Subsequent rows are sampled with probability proportional to the squared L2 + * distance from the nearest row selected thus far till num_to_sample rows have + * been sampled. + * + * @param points Matrix of shape (n, d). Rows are assumed to be input points. + * @param numToSample Scalar. The number of rows to sample. This value must not be larger than n. + * @param seed Scalar. Seed for initializing the random number generator. + * @param numRetriesPerSample Scalar. For each row that is sampled, this parameter + * specifies the number of additional points to draw from the current + * distribution before selecting the best. If a negative value is specified, a + * heuristic is used to sample O(log(num_to_sample)) additional points. + * @return a new instance of KmeansPlusPlusInitialization + */ + public KmeansPlusPlusInitialization kmeansPlusPlusInitialization(Operand points, + Operand numToSample, Operand seed, Operand numRetriesPerSample) { + return KmeansPlusPlusInitialization.create(scope, points, numToSample, seed, numRetriesPerSample); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java new file mode 100644 index 00000000000..de786dc95fe --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java @@ -0,0 +1,214 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.collective.CollectiveAllToAll; +import org.tensorflow.op.collective.CollectiveAssignGroup; +import org.tensorflow.op.collective.CollectiveBcastRecv; +import org.tensorflow.op.collective.CollectiveBcastSend; +import org.tensorflow.op.collective.CollectiveGather; +import org.tensorflow.op.collective.CollectiveInitializeCommunicator; +import org.tensorflow.op.collective.CollectivePermute; +import org.tensorflow.op.collective.CollectiveReduce; +import org.tensorflow.op.collective.CollectiveReduceScatter; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * An API for building {@code collective} operations as {@link Op Op}s + * + * @see Ops + */ +public final class CollectiveOps { + private final Scope scope; + + private final Ops ops; + + CollectiveOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Mutually exchanges multiple tensors of identical type and shape. + * + * @param input The input value + * @param communicator The communicator value + * @param groupAssignment The groupAssignment value + * @param options carries optional attribute values + * @param data type for {@code CollectiveAllToAllV3} output and operands + * @return a new instance of CollectiveAllToAll + */ + public CollectiveAllToAll collectiveAllToAll(Operand input, + Operand communicator, Operand groupAssignment, + CollectiveAllToAll.Options... options) { + return CollectiveAllToAll.create(scope, input, communicator, groupAssignment, options); + } + + /** + * Assign group keys based on group assignment. + * + * @param groupAssignment The groupAssignment value + * @param deviceIndex The deviceIndex value + * @param baseKey The baseKey value + * @return a new instance of CollectiveAssignGroup + */ + public CollectiveAssignGroup collectiveAssignGroup(Operand groupAssignment, + Operand deviceIndex, Operand baseKey) { + return CollectiveAssignGroup.create(scope, groupAssignment, deviceIndex, baseKey); + } + + /** + * Receives a tensor value broadcast from another device. + * + * @param groupSize The groupSize value + * @param groupKey The groupKey value + * @param instanceKey The instanceKey value + * @param shape The shape value + * @param T The value of the T attribute + * @param options carries optional attribute values + * @param data type for {@code CollectiveBcastRecvV2} output and operands + * @return a new instance of CollectiveBcastRecv + */ + public CollectiveBcastRecv collectiveBcastRecv(Operand groupSize, + Operand groupKey, Operand instanceKey, Operand shape, + Class T, CollectiveBcastRecv.Options... options) { + return CollectiveBcastRecv.create(scope, groupSize, groupKey, instanceKey, shape, T, options); + } + + /** + * Broadcasts a tensor value to one or more other devices. + * + * @param input The input value + * @param groupSize The groupSize value + * @param groupKey The groupKey value + * @param instanceKey The instanceKey value + * @param options carries optional attribute values + * @param data type for {@code CollectiveBcastSendV2} output and operands + * @return a new instance of CollectiveBcastSend + */ + public CollectiveBcastSend collectiveBcastSend(Operand input, + Operand groupSize, Operand groupKey, Operand instanceKey, + CollectiveBcastSend.Options... options) { + return CollectiveBcastSend.create(scope, input, groupSize, groupKey, instanceKey, options); + } + + /** + * Mutually accumulates multiple tensors of identical type and shape. + * {@code is_stateless} means each op does not need control dependencies to other + * collective ops. In this case, keys that are unique at runtime + * (e.g. {@code instance_key}) should be used to distinguish collective groups. + * + * @param input The input value + * @param groupSize The groupSize value + * @param groupKey The groupKey value + * @param instanceKey The instanceKey value + * @param orderingToken The orderingToken value + * @param options carries optional attribute values + * @param data type for {@code CollectiveGatherV2} output and operands + * @return a new instance of CollectiveGather + */ + public CollectiveGather collectiveGather(Operand input, + Operand groupSize, Operand groupKey, Operand instanceKey, + Iterable> orderingToken, CollectiveGather.Options... options) { + return CollectiveGather.create(scope, input, groupSize, groupKey, instanceKey, orderingToken, options); + } + + /** + * Initializes a group for collective operations. + * + * @param groupKey The groupKey value + * @param rank The rank value + * @param groupSize The groupSize value + * @param options carries optional attribute values + * @return a new instance of CollectiveInitializeCommunicator + */ + public CollectiveInitializeCommunicator collectiveInitializeCommunicator(Operand groupKey, + Operand rank, Operand groupSize, + CollectiveInitializeCommunicator.Options... options) { + return CollectiveInitializeCommunicator.create(scope, groupKey, rank, groupSize, options); + } + + /** + * An Op to permute tensors across replicated TPU instances. + * Each instance supplies its own input. + *

For example, suppose there are 4 TPU instances: {@code [A, B, C, D]}. Passing + * source_target_pairs={@code [[0,1],[1,2],[2,3],[3,0]]} gets the outputs: + * {@code [D, A, B, C]}. + * + * @param input The local input to be permuted. Currently only supports float and + * bfloat16. + * @param sourceTargetPairs A tensor with shape [num_pairs, 2]. + * @param data type for {@code CollectivePermute} output and operands + * @return a new instance of CollectivePermute + */ + public CollectivePermute collectivePermute(Operand input, + Operand sourceTargetPairs) { + return CollectivePermute.create(scope, input, sourceTargetPairs); + } + + /** + * Mutually reduces multiple tensors of identical type and shape. + * + * @param input The input value + * @param communicator The communicator value + * @param groupAssignment The groupAssignment value + * @param reduction The value of the reduction attribute + * @param options carries optional attribute values + * @param data type for {@code CollectiveReduceV3} output and operands + * @return a new instance of CollectiveReduce + */ + public CollectiveReduce collectiveReduce(Operand input, + Operand communicator, Operand groupAssignment, String reduction, + CollectiveReduce.Options... options) { + return CollectiveReduce.create(scope, input, communicator, groupAssignment, reduction, options); + } + + /** + * Mutually reduces multiple tensors of identical type and shape and scatters the result. + * {@code is_stateless} means each op does not need control dependencies to other + * collective ops. In this case, keys that are unique at runtime + * (e.g. {@code instance_key}) should be used to distinguish collective groups. + * + * @param input The input value + * @param groupSize The groupSize value + * @param groupKey The groupKey value + * @param instanceKey The instanceKey value + * @param orderingToken The orderingToken value + * @param mergeOp The value of the mergeOp attribute + * @param finalOp The value of the finalOp attribute + * @param options carries optional attribute values + * @param data type for {@code CollectiveReduceScatterV2} output and operands + * @return a new instance of CollectiveReduceScatter + */ + public CollectiveReduceScatter collectiveReduceScatter(Operand input, + Operand groupSize, Operand groupKey, Operand instanceKey, + Iterable> orderingToken, String mergeOp, String finalOp, + CollectiveReduceScatter.Options... options) { + return CollectiveReduceScatter.create(scope, input, groupSize, groupKey, instanceKey, orderingToken, mergeOp, finalOp, options); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java new file mode 100644 index 00000000000..4fa8e60e295 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataExperimentalOps.java @@ -0,0 +1,738 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import java.util.List; +import org.tensorflow.ConcreteFunction; +import org.tensorflow.Operand; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.data.experimental.AssertNextDataset; +import org.tensorflow.op.data.experimental.AutoShardDataset; +import org.tensorflow.op.data.experimental.BytesProducedStatsDataset; +import org.tensorflow.op.data.experimental.CSVDataset; +import org.tensorflow.op.data.experimental.ChooseFastestDataset; +import org.tensorflow.op.data.experimental.DatasetCardinality; +import org.tensorflow.op.data.experimental.DatasetToTFRecord; +import org.tensorflow.op.data.experimental.DenseToSparseBatchDataset; +import org.tensorflow.op.data.experimental.DirectedInterleaveDataset; +import org.tensorflow.op.data.experimental.GroupByReducerDataset; +import org.tensorflow.op.data.experimental.GroupByWindowDataset; +import org.tensorflow.op.data.experimental.IgnoreErrorsDataset; +import org.tensorflow.op.data.experimental.IteratorGetDevice; +import org.tensorflow.op.data.experimental.LatencyStatsDataset; +import org.tensorflow.op.data.experimental.LmdbDataset; +import org.tensorflow.op.data.experimental.MapAndBatchDataset; +import org.tensorflow.op.data.experimental.MapDataset; +import org.tensorflow.op.data.experimental.MatchingFilesDataset; +import org.tensorflow.op.data.experimental.MaxIntraOpParallelismDataset; +import org.tensorflow.op.data.experimental.NonSerializableDataset; +import org.tensorflow.op.data.experimental.ParallelInterleaveDataset; +import org.tensorflow.op.data.experimental.ParseExampleDataset; +import org.tensorflow.op.data.experimental.PrivateThreadPoolDataset; +import org.tensorflow.op.data.experimental.RandomDataset; +import org.tensorflow.op.data.experimental.RebatchDataset; +import org.tensorflow.op.data.experimental.ScanDataset; +import org.tensorflow.op.data.experimental.SetStatsAggregatorDataset; +import org.tensorflow.op.data.experimental.SleepDataset; +import org.tensorflow.op.data.experimental.SlidingWindowDataset; +import org.tensorflow.op.data.experimental.SqlDataset; +import org.tensorflow.op.data.experimental.StatsAggregatorHandle; +import org.tensorflow.op.data.experimental.StatsAggregatorSummary; +import org.tensorflow.op.data.experimental.TakeWhileDataset; +import org.tensorflow.op.data.experimental.ThreadPoolDataset; +import org.tensorflow.op.data.experimental.ThreadPoolHandle; +import org.tensorflow.op.data.experimental.UnbatchDataset; +import org.tensorflow.op.data.experimental.UniqueDataset; +import org.tensorflow.types.TBool; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; + +/** + * An API for building {@code data.experimental} operations as {@link Op Op}s + * + * @see Ops + */ +public final class DataExperimentalOps { + private final Scope scope; + + private final Ops ops; + + DataExperimentalOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * The ExperimentalAssertNextDataset operation + * + * @param inputDataset The inputDataset value + * @param transformations The transformations value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertNextDataset + */ + public AssertNextDataset assertNextDataset(Operand inputDataset, + Operand transformations, List> outputTypes, + List outputShapes) { + return AssertNextDataset.create(scope, inputDataset, transformations, outputTypes, outputShapes); + } + + /** + * Creates a dataset that shards the input dataset. + * Creates a dataset that shards the input dataset by num_workers, returning a + * sharded dataset for the index-th worker. This attempts to automatically shard + * a dataset by examining the Dataset graph and inserting a shard op before the + * inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + *

This dataset will throw a NotFound error if we cannot shard the dataset + * automatically. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param numWorkers A scalar representing the number of workers to distribute this dataset across. + * @param index A scalar representing the index of the current worker out of num_workers. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of AutoShardDataset + */ + public AutoShardDataset autoShardDataset(Operand inputDataset, + Operand numWorkers, Operand index, List> outputTypes, + List outputShapes, AutoShardDataset.Options... options) { + return AutoShardDataset.create(scope, inputDataset, numWorkers, index, outputTypes, outputShapes, options); + } + + /** + * Records the bytes size of each element of {@code input_dataset} in a StatsAggregator. + * + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of BytesProducedStatsDataset + */ + public BytesProducedStatsDataset bytesProducedStatsDataset(Operand inputDataset, + Operand tag, List> outputTypes, List outputShapes) { + return BytesProducedStatsDataset.create(scope, inputDataset, tag, outputTypes, outputShapes); + } + + /** + * The ExperimentalCSVDataset operation + * + * @param filenames The filenames value + * @param compressionType The compressionType value + * @param bufferSize The bufferSize value + * @param header The header value + * @param fieldDelim The fieldDelim value + * @param useQuoteDelim The useQuoteDelim value + * @param naValue The naValue value + * @param selectCols The selectCols value + * @param recordDefaults The recordDefaults value + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of CSVDataset + */ + public CSVDataset cSVDataset(Operand filenames, Operand compressionType, + Operand bufferSize, Operand header, Operand fieldDelim, + Operand useQuoteDelim, Operand naValue, Operand selectCols, + Iterable> recordDefaults, List outputShapes) { + return CSVDataset.create(scope, filenames, compressionType, bufferSize, header, fieldDelim, useQuoteDelim, naValue, selectCols, recordDefaults, outputShapes); + } + + /** + * The ExperimentalChooseFastestDataset operation + * + * @param inputDatasets The inputDatasets value + * @param numExperiments The value of the numExperiments attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ChooseFastestDataset + */ + public ChooseFastestDataset chooseFastestDataset(Iterable> inputDatasets, + Long numExperiments, List> outputTypes, List outputShapes) { + return ChooseFastestDataset.create(scope, inputDatasets, numExperiments, outputTypes, outputShapes); + } + + /** + * Returns the cardinality of {@code input_dataset}. + * Returns the cardinality of {@code input_dataset}. + * + * @param inputDataset A variant tensor representing the dataset to return cardinality for. + * @return a new instance of DatasetCardinality + */ + public DatasetCardinality datasetCardinality(Operand inputDataset) { + return DatasetCardinality.create(scope, inputDataset); + } + + /** + * Writes the given dataset to the given file using the TFRecord format. + * + * @param inputDataset A variant tensor representing the dataset to write. + * @param filename A scalar string tensor representing the filename to use. + * @param compressionType A scalar string tensor containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @return a new instance of DatasetToTFRecord + */ + public DatasetToTFRecord datasetToTFRecord(Operand inputDataset, + Operand filename, Operand compressionType) { + return DatasetToTFRecord.create(scope, inputDataset, filename, compressionType); + } + + /** + * Creates a dataset that batches input elements into a SparseTensor. + * + * @param inputDataset A handle to an input dataset. Must have a single component. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. + * @param rowShape A vector representing the dense shape of each row in the produced + * SparseTensor. The shape may be partially specified, using {@code -1} to indicate + * that a particular dimension should use the maximum size of all batch elements. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of DenseToSparseBatchDataset + */ + public DenseToSparseBatchDataset denseToSparseBatchDataset(Operand inputDataset, + Operand batchSize, Operand rowShape, List> outputTypes, + List outputShapes) { + return DenseToSparseBatchDataset.create(scope, inputDataset, batchSize, rowShape, outputTypes, outputShapes); + } + + /** + * A substitute for {@code InterleaveDataset} on a fixed list of {@code N} datasets. + * + * @param selectorInputDataset A dataset of scalar {@code DT_INT64} elements that determines which of the + * {@code N} data inputs should produce the next output element. + * @param dataInputDatasets {@code N} datasets with the same type that will be interleaved according to + * the values of {@code selector_input_dataset}. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of DirectedInterleaveDataset + */ + public DirectedInterleaveDataset directedInterleaveDataset( + Operand selectorInputDataset, + Iterable> dataInputDatasets, + List> outputTypes, List outputShapes) { + return DirectedInterleaveDataset.create(scope, selectorInputDataset, dataInputDatasets, outputTypes, outputShapes); + } + + /** + * Creates a dataset that computes a group-by on {@code input_dataset}. + * Creates a dataset that computes a group-by on {@code input_dataset}. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param keyFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for {@code key_func}. + * @param initFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for {@code init_func}. + * @param reduceFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for {@code reduce_func}. + * @param finalizeFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for {@code finalize_func}. + * @param keyFunc A function mapping an element of {@code input_dataset}, concatenated + * with {@code key_func_other_arguments} to a scalar value of type DT_INT64. + * @param initFunc A function mapping a key of type DT_INT64, concatenated with + * {@code init_func_other_arguments} to the initial reducer state. + * @param reduceFunc A function mapping the current reducer state and an element of {@code input_dataset}, + * concatenated with {@code reduce_func_other_arguments} to a new reducer state. + * @param finalizeFunc A function mapping the final reducer state to an output element. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GroupByReducerDataset + */ + public GroupByReducerDataset groupByReducerDataset(Operand inputDataset, + Iterable> keyFuncOtherArguments, Iterable> initFuncOtherArguments, + Iterable> reduceFuncOtherArguments, + Iterable> finalizeFuncOtherArguments, ConcreteFunction keyFunc, + ConcreteFunction initFunc, ConcreteFunction reduceFunc, ConcreteFunction finalizeFunc, + List> outputTypes, List outputShapes) { + return GroupByReducerDataset.create(scope, inputDataset, keyFuncOtherArguments, initFuncOtherArguments, reduceFuncOtherArguments, finalizeFuncOtherArguments, keyFunc, initFunc, reduceFunc, finalizeFunc, outputTypes, outputShapes); + } + + /** + * Creates a dataset that computes a windowed group-by on {@code input_dataset}. + * // TODO(mrry): Support non-int64 keys. + * + * @param inputDataset The inputDataset value + * @param keyFuncOtherArguments The keyFuncOtherArguments value + * @param reduceFuncOtherArguments The reduceFuncOtherArguments value + * @param windowSizeFuncOtherArguments The windowSizeFuncOtherArguments value + * @param keyFunc A function mapping an element of {@code input_dataset}, concatenated + * with {@code key_func_other_arguments} to a scalar value of type DT_INT64. + * @param reduceFunc The value of the reduceFunc attribute + * @param windowSizeFunc The value of the windowSizeFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GroupByWindowDataset + */ + public GroupByWindowDataset groupByWindowDataset(Operand inputDataset, + Iterable> keyFuncOtherArguments, Iterable> reduceFuncOtherArguments, + Iterable> windowSizeFuncOtherArguments, ConcreteFunction keyFunc, + ConcreteFunction reduceFunc, ConcreteFunction windowSizeFunc, + List> outputTypes, List outputShapes) { + return GroupByWindowDataset.create(scope, inputDataset, keyFuncOtherArguments, reduceFuncOtherArguments, windowSizeFuncOtherArguments, keyFunc, reduceFunc, windowSizeFunc, outputTypes, outputShapes); + } + + /** + * Creates a dataset that contains the elements of {@code input_dataset} ignoring errors. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of IgnoreErrorsDataset + */ + public IgnoreErrorsDataset ignoreErrorsDataset(Operand inputDataset, + List> outputTypes, List outputShapes, + IgnoreErrorsDataset.Options... options) { + return IgnoreErrorsDataset.create(scope, inputDataset, outputTypes, outputShapes, options); + } + + /** + * Returns the name of the device on which {@code resource} has been placed. + * + * @param resource The resource value + * @return a new instance of IteratorGetDevice + */ + public IteratorGetDevice iteratorGetDevice(Operand resource) { + return IteratorGetDevice.create(scope, resource); + } + + /** + * Records the latency of producing {@code input_dataset} elements in a StatsAggregator. + * + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LatencyStatsDataset + */ + public LatencyStatsDataset latencyStatsDataset(Operand inputDataset, + Operand tag, List> outputTypes, List outputShapes) { + return LatencyStatsDataset.create(scope, inputDataset, tag, outputTypes, outputShapes); + } + + /** + * The ExperimentalLMDBDataset operation + * + * @param filenames The filenames value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LmdbDataset + */ + public LmdbDataset lmdbDataset(Operand filenames, + List> outputTypes, List outputShapes) { + return LmdbDataset.create(scope, filenames, outputTypes, outputShapes); + } + + /** + * Creates a dataset that fuses mapping with batching. + * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset} and then + * batches {@code batch_size} of them. + *

Unlike a "MapDataset", which applies {@code f} sequentially, this dataset invokes up + * to {@code batch_size * num_parallel_batches} copies of {@code f} in parallel. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param otherArguments A list of tensors, typically values that were captured when building a closure + * for {@code f}. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. It determines the number of concurrent invocations of {@code f} that process + * elements from {@code input_dataset} in parallel. + * @param numParallelCalls A scalar representing the maximum number of parallel invocations of the {@code map_fn} + * function. Applying the {@code map_fn} on consecutive input elements in parallel has + * the potential to improve input pipeline throughput. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case its size + * is smaller than desired. + * @param f A function to apply to the outputs of {@code input_dataset}. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapAndBatchDataset + */ + public MapAndBatchDataset mapAndBatchDataset(Operand inputDataset, + Iterable> otherArguments, Operand batchSize, + Operand numParallelCalls, Operand dropRemainder, ConcreteFunction f, + List> outputTypes, List outputShapes, + MapAndBatchDataset.Options... options) { + return MapAndBatchDataset.create(scope, inputDataset, otherArguments, batchSize, numParallelCalls, dropRemainder, f, outputTypes, outputShapes, options); + } + + /** + * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapDataset + */ + public MapDataset mapDataset(Operand inputDataset, + Iterable> otherArguments, ConcreteFunction f, + List> outputTypes, List outputShapes, + MapDataset.Options... options) { + return MapDataset.create(scope, inputDataset, otherArguments, f, outputTypes, outputShapes, options); + } + + /** + * The ExperimentalMatchingFilesDataset operation + * + * @param patterns The patterns value + * @return a new instance of MatchingFilesDataset + */ + public MatchingFilesDataset matchingFilesDataset(Operand patterns) { + return MatchingFilesDataset.create(scope, patterns); + } + + /** + * Creates a dataset that overrides the maximum intra-op parallelism. + * + * @param inputDataset The inputDataset value + * @param maxIntraOpParallelism Identifies the maximum intra-op parallelism to use. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of MaxIntraOpParallelismDataset + */ + public MaxIntraOpParallelismDataset maxIntraOpParallelismDataset( + Operand inputDataset, Operand maxIntraOpParallelism, + List> outputTypes, List outputShapes) { + return MaxIntraOpParallelismDataset.create(scope, inputDataset, maxIntraOpParallelism, outputTypes, outputShapes); + } + + /** + * The ExperimentalNonSerializableDataset operation + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of NonSerializableDataset + */ + public NonSerializableDataset nonSerializableDataset(Operand inputDataset, + List> outputTypes, List outputShapes) { + return NonSerializableDataset.create(scope, inputDataset, outputTypes, outputShapes); + } + + /** + * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. + * The resulting dataset is similar to the {@code InterleaveDataset}, with the exception + * that if retrieving the next value from a dataset would cause the requester to + * block, it will skip that input dataset. This dataset is especially useful + * when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + * allows the training step to proceed so long as some data is available. + *

!! WARNING !! This dataset is not deterministic! + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param cycleLength The cycleLength value + * @param blockLength The blockLength value + * @param sloppy The sloppy value + * @param bufferOutputElements The bufferOutputElements value + * @param prefetchInputElements The prefetchInputElements value + * @param f A function mapping elements of {@code input_dataset}, concatenated with + * {@code other_arguments}, to a Dataset variant that contains elements matching + * {@code output_types} and {@code output_shapes}. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ParallelInterleaveDataset + */ + public ParallelInterleaveDataset parallelInterleaveDataset(Operand inputDataset, + Iterable> otherArguments, Operand cycleLength, Operand blockLength, + Operand sloppy, Operand bufferOutputElements, + Operand prefetchInputElements, ConcreteFunction f, + List> outputTypes, List outputShapes) { + return ParallelInterleaveDataset.create(scope, inputDataset, otherArguments, cycleLength, blockLength, sloppy, bufferOutputElements, prefetchInputElements, f, outputTypes, outputShapes); + } + + /** + * Transforms {@code input_dataset} containing {@code Example} protos as vectors of DT_STRING into a dataset of {@code Tensor} or {@code SparseTensor} objects representing the parsed features. + * + * @param inputDataset The inputDataset value + * @param numParallelCalls The numParallelCalls value + * @param denseDefaults A dict mapping string keys to {@code Tensor}s. + * The keys of the dict must match the dense_keys of the feature. + * @param sparseKeys A list of string keys in the examples features. + * The results for these keys will be returned as {@code SparseTensor} objects. + * @param denseKeys A list of Ndense string Tensors (scalars). + * The keys expected in the Examples features associated with dense values. + * @param sparseTypes A list of {@code DTypes} of the same length as {@code sparse_keys}. + * Only {@code tf.float32} ({@code FloatList}), {@code tf.int64} ({@code Int64List}), + * and {@code tf.string} ({@code BytesList}) are supported. + * @param denseShapes List of tuples with the same length as {@code dense_keys}. + * The shape of the data for each dense feature referenced by {@code dense_keys}. + * Required for any input tensors identified by {@code dense_keys}. Must be + * either fully defined, or may contain an unknown first dimension. + * An unknown first dimension means the feature is treated as having + * a variable number of blocks, and the output shape along this dimension + * is considered unknown at graph build time. Padding is applied for + * minibatch elements smaller than the maximum number of blocks for the + * given feature along this dimension. + * @param outputTypes The type list for the return values. + * @param outputShapes The list of shapes being produced. + * @param options carries optional attribute values + * @return a new instance of ParseExampleDataset + */ + public ParseExampleDataset parseExampleDataset(Operand inputDataset, + Operand numParallelCalls, Iterable> denseDefaults, List sparseKeys, + List denseKeys, List> sparseTypes, List denseShapes, + List> outputTypes, List outputShapes, + ParseExampleDataset.Options... options) { + return ParseExampleDataset.create(scope, inputDataset, numParallelCalls, denseDefaults, sparseKeys, denseKeys, sparseTypes, denseShapes, outputTypes, outputShapes, options); + } + + /** + * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param numThreads Identifies the number of threads to use for the private threadpool. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of PrivateThreadPoolDataset + */ + public PrivateThreadPoolDataset privateThreadPoolDataset(Operand inputDataset, + Operand numThreads, List> outputTypes, + List outputShapes) { + return PrivateThreadPoolDataset.create(scope, inputDataset, numThreads, outputTypes, outputShapes); + } + + /** + * Creates a Dataset that returns pseudorandom numbers. + * + * @param seed A scalar seed for the random number generator. If either seed or + * seed2 is set to be non-zero, the random number generator is seeded + * by the given seed. Otherwise, a random seed is used. + * @param seed2 A second scalar seed to avoid seed collision. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of RandomDataset + */ + public RandomDataset randomDataset(Operand seed, Operand seed2, + List> outputTypes, List outputShapes) { + return RandomDataset.create(scope, seed, seed2, outputTypes, outputShapes); + } + + /** + * Creates a dataset that changes the batch size. + * Creates a dataset that changes the batch size of the dataset to current batch + * size // num_replicas. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param numReplicas A scalar representing the number of replicas to distribute this batch across. As + * a result of this transformation the current batch size would end up being + * divided by this parameter. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RebatchDataset + */ + public RebatchDataset rebatchDataset(Operand inputDataset, + Operand numReplicas, List> outputTypes, + List outputShapes, RebatchDataset.Options... options) { + return RebatchDataset.create(scope, inputDataset, numReplicas, outputTypes, outputShapes, options); + } + + /** + * Creates a dataset successively reduces {@code f} over the elements of {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param initialState The initialState value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ScanDataset + */ + public ScanDataset scanDataset(Operand inputDataset, + Iterable> initialState, Iterable> otherArguments, ConcreteFunction f, + List> outputTypes, List outputShapes, + ScanDataset.Options... options) { + return ScanDataset.create(scope, inputDataset, initialState, otherArguments, f, outputTypes, outputShapes, options); + } + + /** + * The ExperimentalSetStatsAggregatorDataset operation + * + * @param inputDataset The inputDataset value + * @param statsAggregator The statsAggregator value + * @param tag The tag value + * @param counterPrefix The counterPrefix value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SetStatsAggregatorDataset + */ + public SetStatsAggregatorDataset setStatsAggregatorDataset(Operand inputDataset, + Operand statsAggregator, Operand tag, + Operand counterPrefix, List> outputTypes, + List outputShapes) { + return SetStatsAggregatorDataset.create(scope, inputDataset, statsAggregator, tag, counterPrefix, outputTypes, outputShapes); + } + + /** + * The ExperimentalSleepDataset operation + * + * @param inputDataset The inputDataset value + * @param sleepMicroseconds The sleepMicroseconds value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SleepDataset + */ + public SleepDataset sleepDataset(Operand inputDataset, + Operand sleepMicroseconds, List> outputTypes, + List outputShapes) { + return SleepDataset.create(scope, inputDataset, sleepMicroseconds, outputTypes, outputShapes); + } + + /** + * Creates a dataset that passes a sliding window over {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param windowSize A scalar representing the number of elements in the + * sliding window. + * @param windowShift A scalar representing the steps moving the sliding window + * forward in one iteration. It must be positive. + * @param windowStride A scalar representing the stride of the input elements of the sliding window. + * It must be positive. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SlidingWindowDataset + */ + public SlidingWindowDataset slidingWindowDataset(Operand inputDataset, + Operand windowSize, Operand windowShift, Operand windowStride, + List> outputTypes, List outputShapes) { + return SlidingWindowDataset.create(scope, inputDataset, windowSize, windowShift, windowStride, outputTypes, outputShapes); + } + + /** + * Creates a dataset that executes a SQL query and emits rows of the result set. + * + * @param driverName The database type. Currently, the only supported type is 'sqlite'. + * @param dataSourceName A connection string to connect to the database. + * @param query A SQL query to execute. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SqlDataset + */ + public SqlDataset sqlDataset(Operand driverName, Operand dataSourceName, + Operand query, List> outputTypes, List outputShapes) { + return SqlDataset.create(scope, driverName, dataSourceName, query, outputTypes, outputShapes); + } + + /** + * Creates a statistics manager resource. + * + * @param options carries optional attribute values + * @return a new instance of StatsAggregatorHandle + */ + public StatsAggregatorHandle statsAggregatorHandle(StatsAggregatorHandle.Options... options) { + return StatsAggregatorHandle.create(scope, options); + } + + /** + * Produces a summary of any statistics recorded by the given statistics manager. + * + * @param iterator The iterator value + * @return a new instance of StatsAggregatorSummary + */ + public StatsAggregatorSummary statsAggregatorSummary(Operand iterator) { + return StatsAggregatorSummary.create(scope, iterator); + } + + /** + * Creates a dataset that stops iteration when predicate` is false. + * The {@code predicate} function must return a scalar boolean and accept the + * following arguments: + *

    + *
  • One tensor for each component of an element of {@code input_dataset}.
  • + *
  • One tensor for each value in {@code other_arguments}.
  • + *
+ * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for {@code predicate}. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of TakeWhileDataset + */ + public TakeWhileDataset takeWhileDataset(Operand inputDataset, + Iterable> otherArguments, ConcreteFunction predicate, + List> outputTypes, List outputShapes) { + return TakeWhileDataset.create(scope, inputDataset, otherArguments, predicate, outputTypes, outputShapes); + } + + /** + * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param threadPool A resource produced by the ThreadPoolHandle op. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ThreadPoolDataset + */ + public ThreadPoolDataset threadPoolDataset(Operand inputDataset, + Operand threadPool, List> outputTypes, + List outputShapes) { + return ThreadPoolDataset.create(scope, inputDataset, threadPool, outputTypes, outputShapes); + } + + /** + * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. + * + * @param numThreads The number of threads in the thread pool. + * @param displayName A human-readable name for the threads that may be visible in some + * visualizations. + * threadpool. + * @param options carries optional attribute values + * @return a new instance of ThreadPoolHandle + */ + public ThreadPoolHandle threadPoolHandle(Long numThreads, String displayName, + ThreadPoolHandle.Options... options) { + return ThreadPoolHandle.create(scope, numThreads, displayName, options); + } + + /** + * A dataset that splits the elements of its input into multiple elements. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of UnbatchDataset + */ + public UnbatchDataset unbatchDataset(Operand inputDataset, + List> outputTypes, List outputShapes) { + return UnbatchDataset.create(scope, inputDataset, outputTypes, outputShapes); + } + + /** + * Creates a dataset that contains the unique elements of {@code input_dataset}. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of UniqueDataset + */ + public UniqueDataset uniqueDataset(Operand inputDataset, + List> outputTypes, List outputShapes) { + return UniqueDataset.create(scope, inputDataset, outputTypes, outputShapes); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java index 3f682c82355..5a3a14b799e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java @@ -22,9 +22,11 @@ import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.data.AnonymousIterator; +import org.tensorflow.op.data.AnonymousMemoryCache; import org.tensorflow.op.data.AnonymousMultiDeviceIterator; import org.tensorflow.op.data.AssertCardinalityDataset; import org.tensorflow.op.data.AssertNextDataset; +import org.tensorflow.op.data.AssertPrevDataset; import org.tensorflow.op.data.AutoShardDataset; import org.tensorflow.op.data.BatchDataset; import org.tensorflow.op.data.BytesProducedStatsDataset; @@ -32,17 +34,22 @@ import org.tensorflow.op.data.CacheDataset; import org.tensorflow.op.data.ChooseFastestBranchDataset; import org.tensorflow.op.data.ChooseFastestDataset; +import org.tensorflow.op.data.CompressElement; import org.tensorflow.op.data.ConcatenateDataset; import org.tensorflow.op.data.DataServiceDataset; import org.tensorflow.op.data.DatasetCardinality; +import org.tensorflow.op.data.DatasetFingerprint; import org.tensorflow.op.data.DatasetFromGraph; import org.tensorflow.op.data.DatasetToGraph; import org.tensorflow.op.data.DatasetToSingleElement; import org.tensorflow.op.data.DatasetToTfRecord; import org.tensorflow.op.data.DeleteIterator; +import org.tensorflow.op.data.DeleteMemoryCache; +import org.tensorflow.op.data.DeleteMultiDeviceIterator; import org.tensorflow.op.data.DenseToSparseBatchDataset; import org.tensorflow.op.data.DeserializeIterator; import org.tensorflow.op.data.DirectedInterleaveDataset; +import org.tensorflow.op.data.DummyIterationCounter; import org.tensorflow.op.data.FilterByLastComponentDataset; import org.tensorflow.op.data.FilterDataset; import org.tensorflow.op.data.FinalizeDataset; @@ -52,16 +59,22 @@ import org.tensorflow.op.data.GroupByReducerDataset; import org.tensorflow.op.data.GroupByWindowDataset; import org.tensorflow.op.data.IgnoreErrorsDataset; +import org.tensorflow.op.data.IndexFlatMapDataset; import org.tensorflow.op.data.InitializeTableFromDataset; import org.tensorflow.op.data.InterleaveDataset; import org.tensorflow.op.data.Iterator; +import org.tensorflow.op.data.IteratorFromStringHandle; +import org.tensorflow.op.data.IteratorGetDevice; import org.tensorflow.op.data.IteratorGetNext; import org.tensorflow.op.data.IteratorGetNextAsOptional; import org.tensorflow.op.data.IteratorGetNextSync; import org.tensorflow.op.data.IteratorToStringHandle; import org.tensorflow.op.data.LMDBDataset; import org.tensorflow.op.data.LatencyStatsDataset; +import org.tensorflow.op.data.LeakyReluGrad; import org.tensorflow.op.data.LegacyParallelInterleaveDataset; +import org.tensorflow.op.data.ListDataset; +import org.tensorflow.op.data.ListSnapshotChunksDataset; import org.tensorflow.op.data.LoadDataset; import org.tensorflow.op.data.MakeIterator; import org.tensorflow.op.data.MapAndBatchDataset; @@ -69,6 +82,11 @@ import org.tensorflow.op.data.MatchingFilesDataset; import org.tensorflow.op.data.MaxIntraOpParallelismDataset; import org.tensorflow.op.data.ModelDataset; +import org.tensorflow.op.data.MultiDeviceIterator; +import org.tensorflow.op.data.MultiDeviceIteratorFromStringHandle; +import org.tensorflow.op.data.MultiDeviceIteratorGetNextFromShard; +import org.tensorflow.op.data.MultiDeviceIteratorInit; +import org.tensorflow.op.data.MultiDeviceIteratorToStringHandle; import org.tensorflow.op.data.NonSerializableDataset; import org.tensorflow.op.data.OneShotIterator; import org.tensorflow.op.data.OptimizeDataset; @@ -79,6 +97,7 @@ import org.tensorflow.op.data.OptionsDataset; import org.tensorflow.op.data.PaddedBatchDataset; import org.tensorflow.op.data.ParallelBatchDataset; +import org.tensorflow.op.data.ParallelFilterDataset; import org.tensorflow.op.data.ParallelInterleaveDataset; import org.tensorflow.op.data.ParallelMapDataset; import org.tensorflow.op.data.ParseExampleDataset; @@ -90,6 +109,7 @@ import org.tensorflow.op.data.ReduceDataset; import org.tensorflow.op.data.RegisterDataset; import org.tensorflow.op.data.RepeatDataset; +import org.tensorflow.op.data.RewriteDataset; import org.tensorflow.op.data.SamplingDataset; import org.tensorflow.op.data.SaveDataset; import org.tensorflow.op.data.ScanDataset; @@ -101,9 +121,14 @@ import org.tensorflow.op.data.SkipDataset; import org.tensorflow.op.data.SleepDataset; import org.tensorflow.op.data.SlidingWindowDataset; +import org.tensorflow.op.data.SnapshotChunkDataset; import org.tensorflow.op.data.SnapshotDataset; +import org.tensorflow.op.data.SnapshotDatasetReader; +import org.tensorflow.op.data.SnapshotNestedDatasetReader; import org.tensorflow.op.data.SparseTensorSliceDataset; import org.tensorflow.op.data.SqlDataset; +import org.tensorflow.op.data.StatsAggregatorHandle; +import org.tensorflow.op.data.StatsAggregatorSetSummaryWriter; import org.tensorflow.op.data.TakeDataset; import org.tensorflow.op.data.TakeWhileDataset; import org.tensorflow.op.data.TensorDataset; @@ -111,14 +136,18 @@ import org.tensorflow.op.data.TextLineDataset; import org.tensorflow.op.data.TfRecordDataset; import org.tensorflow.op.data.ThreadPoolDataset; +import org.tensorflow.op.data.ThreadPoolHandle; import org.tensorflow.op.data.UnbatchDataset; +import org.tensorflow.op.data.UncompressElement; import org.tensorflow.op.data.UniqueDataset; import org.tensorflow.op.data.UnwrapDatasetVariant; import org.tensorflow.op.data.WindowDataset; +import org.tensorflow.op.data.WindowOp; import org.tensorflow.op.data.WrapDatasetVariant; import org.tensorflow.op.data.ZipDataset; import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -127,9 +156,11 @@ /** * An API for building {@code data} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class DataOps { + public final DataExperimentalOps experimental; + private final Scope scope; private final Ops ops; @@ -137,6 +168,7 @@ public final class DataOps { DataOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; + experimental = new DataExperimentalOps(ops); } /** @@ -151,6 +183,15 @@ public AnonymousIterator anonymousIterator(List> outputTy return AnonymousIterator.create(scope, outputTypes, outputShapes); } + /** + * The AnonymousMemoryCache operation + * + * @return a new instance of AnonymousMemoryCache + */ + public AnonymousMemoryCache anonymousMemoryCache() { + return AnonymousMemoryCache.create(scope); + } + /** * A container for a multi device iterator resource. * @@ -203,6 +244,30 @@ public AssertNextDataset assertNextDataset(Operand inputDataset return AssertNextDataset.create(scope, inputDataset, transformations, outputTypes, outputShapes); } + /** + * A transformation that asserts which transformations happened previously. + * This transformation checks the names and, optionally, the attribute name-value + * pairs in the {@code transformations} argument against those of the transformations + * that preceded this transformation. If there is a mismatch, the transformation + * raises an exception. + *

The check occurs when iterating over the contents of the dataset, which + * means that the check happens after any static optimizations are applied + * to the dataset graph. + * + * @param inputDataset A variant tensor representing the input dataset. + * {@code data.AssertPrevDataset} passes through the outputs of its input dataset. + * @param transformations A {@code tf.string} vector {@code tf.Tensor} identifying the transformations, with optional + * attribute name-value pairs, that are expected to have happened previously. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertPrevDataset + */ + public AssertPrevDataset assertPrevDataset(Operand inputDataset, + Operand transformations, List> outputTypes, + List outputShapes) { + return AssertPrevDataset.create(scope, inputDataset, transformations, outputTypes, outputShapes); + } + /** * Creates a dataset that shards the input dataset. * Creates a dataset that shards the input dataset by num_workers, returning a @@ -334,6 +399,16 @@ public ChooseFastestDataset chooseFastestDataset(Iterable> components) { + return CompressElement.create(scope, components); + } + /** * Creates a dataset that concatenates {@code input_dataset} with {@code another_dataset}. * @@ -390,6 +465,17 @@ public DatasetCardinality datasetCardinality(Operand inputDatas return DatasetCardinality.create(scope, inputDataset, options); } + /** + * Returns the fingerprint of {@code input_dataset}. + * Returns the fingerprint of {@code input_dataset}. + * + * @param inputDataset A variant tensor representing the dataset to return fingerprint for. + * @return a new instance of DatasetFingerprint + */ + public DatasetFingerprint datasetFingerprint(Operand inputDataset) { + return DatasetFingerprint.create(scope, inputDataset); + } + /** * Creates a dataset from the given {@code graph_def}. * Creates a dataset from the provided {@code graph_def}. @@ -455,6 +541,32 @@ public DeleteIterator deleteIterator(Operand handle, return DeleteIterator.create(scope, handle, deleter); } + /** + * The DeleteMemoryCache operation + * + * @param handle The handle value + * @param deleter The deleter value + * @return a new instance of DeleteMemoryCache + */ + public DeleteMemoryCache deleteMemoryCache(Operand handle, + Operand deleter) { + return DeleteMemoryCache.create(scope, handle, deleter); + } + + /** + * A container for an iterator resource. + * + * @param multiDeviceIterator A handle to the multi device iterator to delete. + * @param iterators A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted. + * @param deleter A variant deleter. + * @return a new instance of DeleteMultiDeviceIterator + */ + public DeleteMultiDeviceIterator deleteMultiDeviceIterator( + Operand multiDeviceIterator, Iterable> iterators, + Operand deleter) { + return DeleteMultiDeviceIterator.create(scope, multiDeviceIterator, iterators, deleter); + } + /** * Creates a dataset that batches input elements into a SparseTensor. * @@ -507,6 +619,15 @@ public DirectedInterleaveDataset directedInterleaveDataset( return DirectedInterleaveDataset.create(scope, selectorInputDataset, dataInputDatasets, outputTypes, outputShapes, options); } + /** + * The DummyIterationCounter operation + * + * @return a new instance of DummyIterationCounter + */ + public DummyIterationCounter dummyIterationCounter() { + return DummyIterationCounter.create(scope); + } + /** * Creates a dataset containing elements of first component of {@code input_dataset} having true in the last component. * @@ -699,6 +820,28 @@ public IgnoreErrorsDataset ignoreErrorsDataset(Operand inputDat return IgnoreErrorsDataset.create(scope, inputDataset, outputTypes, outputShapes, options); } + /** + * The IndexFlatMapDataset operation + * + * @param inputDataset The inputDataset value + * @param mapFuncOtherArgs The mapFuncOtherArgs value + * @param indexMapFuncOtherArgs The indexMapFuncOtherArgs value + * @param outputCardinality The outputCardinality value + * @param mapFunc The value of the mapFunc attribute + * @param indexMapFunc The value of the indexMapFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of IndexFlatMapDataset + */ + public IndexFlatMapDataset indexFlatMapDataset(Operand inputDataset, + Iterable> mapFuncOtherArgs, Iterable> indexMapFuncOtherArgs, + Operand outputCardinality, ConcreteFunction mapFunc, ConcreteFunction indexMapFunc, + List> outputTypes, List outputShapes, + IndexFlatMapDataset.Options... options) { + return IndexFlatMapDataset.create(scope, inputDataset, mapFuncOtherArgs, indexMapFuncOtherArgs, outputCardinality, mapFunc, indexMapFunc, outputTypes, outputShapes, options); + } + /** * The InitializeTableFromDataset operation * @@ -752,6 +895,29 @@ public Iterator iterator(String sharedName, String container, return Iterator.create(scope, sharedName, container, outputTypes, outputShapes); } + /** + * The IteratorFromStringHandleV2 operation + * + * @param stringHandle The stringHandle value + * @param outputTypes The value of the outputTypes attribute + * @param options carries optional attribute values + * @return a new instance of IteratorFromStringHandle + */ + public IteratorFromStringHandle iteratorFromStringHandle(Operand stringHandle, + List> outputTypes, IteratorFromStringHandle.Options... options) { + return IteratorFromStringHandle.create(scope, stringHandle, outputTypes, options); + } + + /** + * Returns the name of the device on which {@code resource} has been placed. + * + * @param resource The resource value + * @return a new instance of IteratorGetDevice + */ + public IteratorGetDevice iteratorGetDevice(Operand resource) { + return IteratorGetDevice.create(scope, resource); + } + /** * Gets the next output from the given iterator . * @@ -841,6 +1007,21 @@ public LatencyStatsDataset latencyStatsDataset(Operand inputDat return LatencyStatsDataset.create(scope, inputDataset, tag, outputTypes, outputShapes); } + /** + * Computes rectified linear gradients for a LeakyRelu operation. + * + * @param gradients The backpropagated gradients to the corresponding LeakyRelu operation. + * @param features The features passed as input to the corresponding LeakyRelu operation, + * OR the outputs of that operation (both work equivalently). + * @param options carries optional attribute values + * @param data type for {@code LeakyReluGrad} output and operands + * @return a new instance of LeakyReluGrad + */ + public LeakyReluGrad leakyReluGrad(Operand gradients, + Operand features, LeakyReluGrad.Options... options) { + return LeakyReluGrad.create(scope, gradients, features, options); + } + /** * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. * The resulting dataset is similar to the {@code InterleaveDataset}, with the exception @@ -873,6 +1054,34 @@ public LegacyParallelInterleaveDataset legacyParallelInterleaveDataset( return LegacyParallelInterleaveDataset.create(scope, inputDataset, otherArguments, cycleLength, blockLength, bufferOutputElements, prefetchInputElements, f, outputTypes, outputShapes, options); } + /** + * Creates a dataset that emits each of {@code tensors} once. + * + * @param tensors The tensors value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ListDataset + */ + public ListDataset listDataset(Iterable> tensors, + List> outputTypes, List outputShapes, + ListDataset.Options... options) { + return ListDataset.create(scope, tensors, outputTypes, outputShapes, options); + } + + /** + * The ListSnapshotChunksDataset operation + * + * @param snapshotPath The snapshotPath value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ListSnapshotChunksDataset + */ + public ListSnapshotChunksDataset listSnapshotChunksDataset(Operand snapshotPath, + List> outputTypes, List outputShapes) { + return ListSnapshotChunksDataset.create(scope, snapshotPath, outputTypes, outputShapes); + } + /** * The LoadDataset operation * @@ -995,6 +1204,78 @@ public ModelDataset modelDataset(Operand inputDataset, return ModelDataset.create(scope, inputDataset, outputTypes, outputShapes, options); } + /** + * Creates a MultiDeviceIterator resource. + * + * @param devices A list of devices the iterator works across. + * @param sharedName If non-empty, this resource will be shared under the given name + * across multiple sessions. + * @param container If non-empty, this resource is placed in the given container. + * Otherwise, a default container is used. + * @param outputTypes The type list for the return values. + * @param outputShapes The list of shapes being produced. + * @return a new instance of MultiDeviceIterator + */ + public MultiDeviceIterator multiDeviceIterator(List devices, String sharedName, + String container, List> outputTypes, List outputShapes) { + return MultiDeviceIterator.create(scope, devices, sharedName, container, outputTypes, outputShapes); + } + + /** + * Generates a MultiDeviceIterator resource from its provided string handle. + * + * @param stringHandle String representing the resource. + * @param outputTypes The type list for the return values. + * @param options carries optional attribute values + * @return a new instance of MultiDeviceIteratorFromStringHandle + */ + public MultiDeviceIteratorFromStringHandle multiDeviceIteratorFromStringHandle( + Operand stringHandle, List> outputTypes, + MultiDeviceIteratorFromStringHandle.Options... options) { + return MultiDeviceIteratorFromStringHandle.create(scope, stringHandle, outputTypes, options); + } + + /** + * Gets next element for the provided shard number. + * + * @param multiDeviceIterator A MultiDeviceIterator resource. + * @param shardNum Integer representing which shard to fetch data for. + * @param incarnationId Which incarnation of the MultiDeviceIterator is running. + * @param outputTypes The type list for the return values. + * @param outputShapes The list of shapes being produced. + * @return a new instance of MultiDeviceIteratorGetNextFromShard + */ + public MultiDeviceIteratorGetNextFromShard multiDeviceIteratorGetNextFromShard( + Operand multiDeviceIterator, Operand shardNum, + Operand incarnationId, List> outputTypes, + List outputShapes) { + return MultiDeviceIteratorGetNextFromShard.create(scope, multiDeviceIterator, shardNum, incarnationId, outputTypes, outputShapes); + } + + /** + * Initializes the multi device iterator with the given dataset. + * + * @param dataset Dataset to be iterated upon. + * @param multiDeviceIterator A MultiDeviceIteratorResource. + * @param maxBufferSize The maximum size of the host side per device buffer to keep. + * @return a new instance of MultiDeviceIteratorInit + */ + public MultiDeviceIteratorInit multiDeviceIteratorInit(Operand dataset, + Operand multiDeviceIterator, Operand maxBufferSize) { + return MultiDeviceIteratorInit.create(scope, dataset, multiDeviceIterator, maxBufferSize); + } + + /** + * Produces a string handle for the given MultiDeviceIterator. + * + * @param multiDeviceIterator A MultiDeviceIterator resource. + * @return a new instance of MultiDeviceIteratorToStringHandle + */ + public MultiDeviceIteratorToStringHandle multiDeviceIteratorToStringHandle( + Operand multiDeviceIterator) { + return MultiDeviceIteratorToStringHandle.create(scope, multiDeviceIterator); + } + /** * The NonSerializableDataset operation * @@ -1162,6 +1443,35 @@ public ParallelBatchDataset parallelBatchDataset(Operand inputD return ParallelBatchDataset.create(scope, inputDataset, batchSize, numParallelCalls, dropRemainder, outputTypes, outputShapes, options); } + /** + * Creates a dataset containing elements of {@code input_dataset} matching {@code predicate}. + * The {@code predicate} function must return a scalar boolean and accept the + * following arguments: + *

    + *
  • One tensor for each component of an element of {@code input_dataset}.
  • + *
  • One tensor for each value in {@code other_arguments}.
  • + *
+ *

Unlike a "FilterDataset", which applies {@code predicate} sequentially, this dataset + * invokes up to {@code num_parallel_calls} copies of {@code predicate} in parallel. + * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for {@code predicate}. + * @param numParallelCalls The number of concurrent invocations of {@code predicate} that process + * elements from {@code input_dataset} in parallel. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelFilterDataset + */ + public ParallelFilterDataset parallelFilterDataset(Operand inputDataset, + Iterable> otherArguments, Operand numParallelCalls, + ConcreteFunction predicate, List> outputTypes, + List outputShapes, ParallelFilterDataset.Options... options) { + return ParallelFilterDataset.create(scope, inputDataset, otherArguments, numParallelCalls, predicate, outputTypes, outputShapes, options); + } + /** * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. * The resulting dataset is similar to the {@code InterleaveDataset}, except that the @@ -1419,6 +1729,21 @@ public RepeatDataset repeatDataset(Operand inputDataset, Operan return RepeatDataset.create(scope, inputDataset, count, outputTypes, outputShapes, options); } + /** + * The RewriteDataset operation + * + * @param inputDataset The inputDataset value + * @param rewriteName The rewriteName value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of RewriteDataset + */ + public RewriteDataset rewriteDataset(Operand inputDataset, + Operand rewriteName, List> outputTypes, + List outputShapes) { + return RewriteDataset.create(scope, inputDataset, rewriteName, outputTypes, outputShapes); + } + /** * Creates a dataset that takes a Bernoulli sample of the contents of another dataset. * There is no transformation in the {@code tf.data} Python API for creating this dataset. @@ -1623,6 +1948,21 @@ public SlidingWindowDataset slidingWindowDataset(Operand inputD return SlidingWindowDataset.create(scope, inputDataset, windowSize, windowShift, windowStride, outputTypes, outputShapes, options); } + /** + * The SnapshotChunkDataset operation + * + * @param chunkFile The chunkFile value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of SnapshotChunkDataset + */ + public SnapshotChunkDataset snapshotChunkDataset(Operand chunkFile, + List> outputTypes, List outputShapes, + SnapshotChunkDataset.Options... options) { + return SnapshotChunkDataset.create(scope, chunkFile, outputTypes, outputShapes, options); + } + /** * Creates a dataset that will write to / read from a snapshot. * This dataset attempts to determine whether a valid snapshot exists at the @@ -1649,6 +1989,37 @@ public SnapshotDataset snapshotDataset(Operand inputDataset, return SnapshotDataset.create(scope, inputDataset, path, readerFuncOtherArgs, shardFuncOtherArgs, outputTypes, outputShapes, readerFunc, shardFunc, options); } + /** + * The SnapshotDatasetReader operation + * + * @param shardDir The shardDir value + * @param startIndex The startIndex value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param version The value of the version attribute + * @param options carries optional attribute values + * @return a new instance of SnapshotDatasetReader + */ + public SnapshotDatasetReader snapshotDatasetReader(Operand shardDir, + Operand startIndex, List> outputTypes, + List outputShapes, Long version, SnapshotDatasetReader.Options... options) { + return SnapshotDatasetReader.create(scope, shardDir, startIndex, outputTypes, outputShapes, version, options); + } + + /** + * The SnapshotNestedDatasetReader operation + * + * @param inputs The inputs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SnapshotNestedDatasetReader + */ + public SnapshotNestedDatasetReader snapshotNestedDatasetReader( + Iterable> inputs, List> outputTypes, + List outputShapes) { + return SnapshotNestedDatasetReader.create(scope, inputs, outputTypes, outputShapes); + } + /** * Creates a dataset that splits a SparseTensor into elements row-wise. * @@ -1677,6 +2048,28 @@ public SqlDataset sqlDataset(Operand driverName, Operand dataS return SqlDataset.create(scope, driverName, dataSourceName, query, outputTypes, outputShapes); } + /** + * The StatsAggregatorHandleV2 operation + * + * @param options carries optional attribute values + * @return a new instance of StatsAggregatorHandle + */ + public StatsAggregatorHandle statsAggregatorHandle(StatsAggregatorHandle.Options... options) { + return StatsAggregatorHandle.create(scope, options); + } + + /** + * Set a summary_writer_interface to record statistics using given stats_aggregator. + * + * @param statsAggregator The statsAggregator value + * @param summary The summary value + * @return a new instance of StatsAggregatorSetSummaryWriter + */ + public StatsAggregatorSetSummaryWriter statsAggregatorSetSummaryWriter( + Operand statsAggregator, Operand summary) { + return StatsAggregatorSetSummaryWriter.create(scope, statsAggregator, summary); + } + /** * Creates a dataset that contains {@code count} elements from the {@code input_dataset}. * @@ -1798,6 +2191,21 @@ public ThreadPoolDataset threadPoolDataset(Operand inputDataset return ThreadPoolDataset.create(scope, inputDataset, threadPool, outputTypes, outputShapes); } + /** + * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. + * + * @param numThreads The number of threads in the thread pool. + * @param displayName A human-readable name for the threads that may be visible in some + * visualizations. + * threadpool. + * @param options carries optional attribute values + * @return a new instance of ThreadPoolHandle + */ + public ThreadPoolHandle threadPoolHandle(Long numThreads, String displayName, + ThreadPoolHandle.Options... options) { + return ThreadPoolHandle.create(scope, numThreads, displayName, options); + } + /** * A dataset that splits the elements of its input into multiple elements. * @@ -1813,6 +2221,19 @@ public UnbatchDataset unbatchDataset(Operand inputDataset, return UnbatchDataset.create(scope, inputDataset, outputTypes, outputShapes, options); } + /** + * Uncompresses a compressed dataset element. + * + * @param compressed The compressed value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of UncompressElement + */ + public UncompressElement uncompressElement(Operand compressed, + List> outputTypes, List outputShapes) { + return UncompressElement.create(scope, compressed, outputTypes, outputShapes); + } + /** * Creates a dataset that contains the unique elements of {@code input_dataset}. * @@ -1899,6 +2320,19 @@ public WindowDataset windowDataset(Operand inputDataset, return WindowDataset.create(scope, inputDataset, sizeOutput, shift, stride, dropRemainder, outputTypes, outputShapes, options); } + /** + * The WindowOp operation + * + * @param inputs The inputs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of WindowOp + */ + public WindowOp windowOp(Iterable> inputs, List> outputTypes, + List outputShapes) { + return WindowOp.create(scope, inputs, outputTypes, outputShapes); + } + /** * The WrapDatasetVariant operation * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java new file mode 100644 index 00000000000..4ea1efd10db --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java @@ -0,0 +1,61 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.debugging.CheckNumerics; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code debugging} operations as {@link Op Op}s + * + * @see Ops + */ +public final class DebuggingOps { + private final Scope scope; + + private final Ops ops; + + DebuggingOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Checks a tensor for NaN, -Inf and +Inf values. + * When run, reports an {@code InvalidArgument} error if {@code tensor} has any values + * that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + * tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf + * in the errors it throws. + * + * @param tensor The tensor value + * @param message Prefix of the error message. + * @param data type for {@code CheckNumericsV2} output and operands + * @return a new instance of CheckNumerics + */ + public CheckNumerics checkNumerics(Operand tensor, String message) { + return CheckNumerics.create(scope, tensor, message); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java new file mode 100644 index 00000000000..4f30df6352d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java @@ -0,0 +1,110 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.distribute.NcclAllReduce; +import org.tensorflow.op.distribute.NcclBroadcast; +import org.tensorflow.op.distribute.NcclReduce; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code distribute} operations as {@link Op Op}s + * + * @see Ops + */ +public final class DistributeOps { + private final Scope scope; + + private final Ops ops; + + DistributeOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Outputs a tensor containing the reduction across all input tensors. + * Outputs a tensor containing the reduction across all input tensors passed to ops + * within the same `shared_name. + *

The graph should be constructed so if one op runs with shared_name value {@code c}, + * then {@code num_devices} ops will run with shared_name value {@code c}. Failure to do so + * will cause the graph execution to fail to complete. + *

input: the input to the reduction + * data: the value of the reduction across all {@code num_devices} devices. + * reduction: the reduction operation to perform. + * num_devices: The number of devices participating in this reduction. + * shared_name: Identifier that shared between ops of the same reduction. + * + * @param input The input value + * @param reduction The value of the reduction attribute + * @param numDevices The value of the numDevices attribute + * @param sharedName The value of the sharedName attribute + * @param data type for {@code NcclAllReduce} output and operands + * @return a new instance of NcclAllReduce + */ + public NcclAllReduce ncclAllReduce(Operand input, String reduction, + Long numDevices, String sharedName) { + return NcclAllReduce.create(scope, input, reduction, numDevices, sharedName); + } + + /** + * Sends {@code input} to all devices that are connected to the output. + * Sends {@code input} to all devices that are connected to the output. + *

The graph should be constructed so that all ops connected to the output have a + * valid device assignment, and the op itself is assigned one of these devices. + *

input: The input to the broadcast. + * output: The same as input. + * shape: The shape of the input tensor. + * + * @param input The input value + * @param shape The value of the shape attribute + * @param data type for {@code NcclBroadcast} output and operands + * @return a new instance of NcclBroadcast + */ + public NcclBroadcast ncclBroadcast(Operand input, Shape shape) { + return NcclBroadcast.create(scope, input, shape); + } + + /** + * Reduces {@code input} from {@code num_devices} using {@code reduction} to a single device. + * Reduces {@code input} from {@code num_devices} using {@code reduction} to a single device. + *

The graph should be constructed so that all inputs have a valid device + * assignment, and the op itself is assigned one of these devices. + *

input: The input to the reduction. + * data: the value of the reduction across all {@code num_devices} devices. + * reduction: the reduction operation to perform. + * + * @param input The input value + * @param reduction The value of the reduction attribute + * @param data type for {@code NcclReduce} output and operands + * @return a new instance of NcclReduce + */ + public NcclReduce ncclReduce(Iterable> input, + String reduction) { + return NcclReduce.create(scope, input, reduction); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java index 319ee68d295..42f59c161d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java @@ -21,13 +21,14 @@ import org.tensorflow.op.dtypes.AsString; import org.tensorflow.op.dtypes.Cast; import org.tensorflow.op.dtypes.Complex; +import org.tensorflow.op.dtypes.ToBool; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** * An API for building {@code dtypes} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class DtypesOps { private final Scope scope; @@ -68,7 +69,6 @@ public AsString asString(Operand input, AsString.Options... opt /** * Cast x of type SrcT to y of DstT. * - * @param data type for {@code y} output * @param x The x value * @param DstT The value of the DstT attribute * @param options carries optional attribute values @@ -94,7 +94,6 @@ public Cast cast(Operand x, Class DstT, * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * * - * @param data type for {@code out} output * @param real The real value * @param imag The imag value * @param Tout The value of the Tout attribute @@ -107,6 +106,31 @@ public Complex complex(Operand real, return Complex.create(scope, real, imag, Tout); } + /** + * Converts a tensor to a scalar predicate. + * Converts a tensor to a scalar predicate with the following rules: + *

    + *
  • + *

    For 0D tensors, truthiness is determined by comparing against a "zero" + * value. For numerical types it is the obvious zero. For strings it is the + * empty string. + *

  • + *
  • + *

    For >0D tensors, truthiness is determined by looking at the number of + * elements. If has zero elements, then the result is false. Otherwise the + * result is true. + *

  • + *
+ *

This matches the behavior of If and While for determining if a tensor counts + * as true/false for a branch condition. + * + * @param input The input value + * @return a new instance of ToBool + */ + public ToBool toBool(Operand input) { + return ToBool.create(scope, input); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java index 3f93386ebb3..f3fa3e6bbc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java @@ -36,20 +36,29 @@ import org.tensorflow.op.image.EncodeJpeg; import org.tensorflow.op.image.EncodeJpegVariableQuality; import org.tensorflow.op.image.EncodePng; +import org.tensorflow.op.image.ExtractGlimpse; import org.tensorflow.op.image.ExtractImagePatches; import org.tensorflow.op.image.ExtractJpegShape; +import org.tensorflow.op.image.GenerateBoundingBoxProposals; import org.tensorflow.op.image.HsvToRgb; +import org.tensorflow.op.image.ImageProjectiveTransformV2; +import org.tensorflow.op.image.ImageProjectiveTransformV3; +import org.tensorflow.op.image.NearestNeighbors; import org.tensorflow.op.image.NonMaxSuppression; import org.tensorflow.op.image.NonMaxSuppressionWithOverlaps; import org.tensorflow.op.image.QuantizedResizeBilinear; import org.tensorflow.op.image.RandomCrop; import org.tensorflow.op.image.ResizeArea; import org.tensorflow.op.image.ResizeBicubic; +import org.tensorflow.op.image.ResizeBicubicGrad; import org.tensorflow.op.image.ResizeBilinear; +import org.tensorflow.op.image.ResizeBilinearGrad; import org.tensorflow.op.image.ResizeNearestNeighbor; +import org.tensorflow.op.image.ResizeNearestNeighborGrad; import org.tensorflow.op.image.RgbToHsv; import org.tensorflow.op.image.SampleDistortedBoundingBox; import org.tensorflow.op.image.ScaleAndTranslate; +import org.tensorflow.op.image.ScaleAndTranslateGrad; import org.tensorflow.op.image.StatelessSampleDistortedBoundingBox; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -62,7 +71,7 @@ /** * An API for building {@code image} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class ImageOps { private final Scope scope; @@ -84,7 +93,6 @@ public final class ImageOps { * channel and then adjusts each component of each pixel to * {@code (x - mean) * contrast_factor + mean}. * - * @param data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. * @param data type for {@code AdjustContrastv2} output and operands @@ -103,7 +111,6 @@ public AdjustContrast adjustContrast(Operand images, * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. * - * @param data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. * @param data type for {@code AdjustHue} output and operands @@ -121,7 +128,6 @@ public AdjustHue adjustHue(Operand images, Operand data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. * @param data type for {@code AdjustSaturation} output and operands @@ -241,7 +247,6 @@ public CropAndResizeGradBoxes cropAndResizeGradBoxes(Operand grads, /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param data type for {@code output} output * @param grads A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. * @param boxes A 2-D tensor of shape {@code [num_boxes, 4]}. The {@code i}-th row of the tensor * specifies the coordinates of a box in the {@code box_ind[i]} image and is specified @@ -348,7 +353,6 @@ public DecodeGif decodeGif(Operand contents) { * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param data type for {@code image} output * @param contents 0-D. The encoded image bytes. * @param options carries optional attribute values * @return a new instance of DecodeImage, with default output types @@ -375,7 +379,6 @@ public DecodeImage decodeImage(Operand contents, DecodeImage.Op * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param data type for {@code image} output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. * @param options carries optional attribute values @@ -429,7 +432,6 @@ public DecodeJpeg decodeJpeg(Operand contents, DecodeJpeg.Options... op *

This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. * - * @param data type for {@code image} output * @param contents 0-D. The PNG-encoded image. * @param options carries optional attribute values * @return a new instance of DecodePng, with default output types @@ -454,7 +456,6 @@ public DecodePng decodePng(Operand contents, DecodePng.Options[ *

This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. * - * @param data type for {@code image} output * @param contents 0-D. The PNG-encoded image. * @param dtype The value of the dtype attribute * @param options carries optional attribute values @@ -478,7 +479,6 @@ public DecodePng decodePng(Operand contents, Cla * the bounding box will be {@code (40, 10)} to {@code (100, 50)} (in (x,y) coordinates). *

Parts of the bounding box may fall outside the image. * - * @param data type for {@code output} output * @param images 4-D with shape {@code [batch, height, width, depth]}. A batch of images. * @param boxes 3-D with shape {@code [batch, num_bounding_boxes, 4]} containing bounding * boxes. @@ -554,10 +554,45 @@ public EncodePng encodePng(Operand image, EncodePng.Options.. return EncodePng.create(scope, image, options); } + /** + * Extracts a glimpse from the input tensor. + * Returns a set of windows called glimpses extracted at location + * {@code offsets} from the input tensor. If the windows only partially + * overlaps the inputs, the non overlapping areas will be filled with + * random noise. + *

The result is a 4-D tensor of shape {@code [batch_size, glimpse_height, glimpse_width, channels]}. The channels and batch dimensions are the + * same as that of the input tensor. The height and width of the output + * windows are specified in the {@code size} parameter. + *

The argument {@code normalized} and {@code centered} controls how the windows are built: + *

    + *
  • If the coordinates are normalized but not centered, 0.0 and 1.0 + * correspond to the minimum and maximum of each height and width + * dimension.
  • + *
  • If the coordinates are both normalized and centered, they range from + * -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper + * left corner, the lower right corner is located at (1.0, 1.0) and the + * center is at (0, 0).
  • + *
  • If the coordinates are not normalized they are interpreted as + * numbers of pixels.
  • + *
+ * + * @param input A 4-D float tensor of shape {@code [batch_size, height, width, channels]}. + * @param sizeOutput A 1-D tensor of 2 elements containing the size of the glimpses + * to extract. The glimpse height must be specified first, following + * by the glimpse width. + * @param offsets A 2-D integer tensor of shape {@code [batch_size, 2]} containing + * the y, x locations of the center of each window. + * @param options carries optional attribute values + * @return a new instance of ExtractGlimpse + */ + public ExtractGlimpse extractGlimpse(Operand input, Operand sizeOutput, + Operand offsets, ExtractGlimpse.Options... options) { + return ExtractGlimpse.create(scope, input, sizeOutput, offsets, options); + } + /** * Extract {@code patches} from {@code images} and put them in the "depth" output dimension. * - * @param data type for {@code patches} output * @param images 4-D Tensor with shape {@code [batch, in_rows, in_cols, depth]}. * @param ksizes The size of the sliding window for each dimension of {@code images}. * @param strides How far the centers of two consecutive patches are in @@ -581,7 +616,6 @@ public ExtractImagePatches extractImagePatches(Operand i * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param data type for {@code image_shape} output * @param contents 0-D. The JPEG-encoded image. * @return a new instance of ExtractJpegShape, with default output types */ @@ -593,7 +627,6 @@ public ExtractJpegShape extractJpegShape(Operand contents) { * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param data type for {@code image_shape} output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. @@ -605,6 +638,40 @@ public ExtractJpegShape extractJpegShape(Operand return ExtractJpegShape.create(scope, contents, outputType); } + /** + * This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497 + *
+   *    The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,
+   *    applies non-maximal suppression on overlapping boxes with higher than
+   *    `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter
+   *    side is less than `min_size`.
+   *    Inputs:
+   *    `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position
+   *    `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor
+   *    `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.
+   *    Outputs:
+   *    `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.
+   *    `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.
+   *  
+ * + * @param scores A 4-D float tensor of shape {@code [num_images, height, width, num_achors]} containing scores of the boxes for given anchors, can be unsorted. + * @param bboxDeltas A 4-D float tensor of shape {@code [num_images, height, width, 4 x num_anchors]}. encoding boxes with respec to each anchor. + * Coordinates are given in the form [dy, dx, dh, dw]. + * @param imageInfo A 2-D float tensor of shape {@code [num_images, 5]} containing image information Height, Width, Scale. + * @param anchors A 2-D float tensor of shape {@code [num_anchors, 4]} describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2]. + * @param nmsThreshold A scalar float tensor for non-maximal-suppression threshold. + * @param preNmsTopn A scalar int tensor for the number of top scoring boxes to be used as input. + * @param minSize A scalar float tensor. Any box that has a smaller size than min_size will be discarded. + * @param options carries optional attribute values + * @return a new instance of GenerateBoundingBoxProposals + */ + public GenerateBoundingBoxProposals generateBoundingBoxProposals(Operand scores, + Operand bboxDeltas, Operand imageInfo, Operand anchors, + Operand nmsThreshold, Operand preNmsTopn, Operand minSize, + GenerateBoundingBoxProposals.Options... options) { + return GenerateBoundingBoxProposals.create(scope, scores, bboxDeltas, imageInfo, anchors, nmsThreshold, preNmsTopn, minSize, options); + } + /** * Convert one or more images from HSV to RGB. * Outputs a tensor of the same shape as the {@code images} tensor, containing the RGB @@ -612,7 +679,6 @@ public ExtractJpegShape extractJpegShape(Operand * are in {@code [0,1]}. *

See {@code rgb_to_hsv} for a description of the HSV encoding. * - * @param data type for {@code output} output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. * @param data type for {@code HSVToRGB} output and operands * @return a new instance of HsvToRgb @@ -621,6 +687,73 @@ public HsvToRgb hsvToRgb(Operand images) { return HsvToRgb.create(scope, images); } + /** + * Applies the given transform to each of the images. + * If one row of {@code transforms} is {@code [a0, a1, a2, b0, b1, b2, c0, c1]}, then it maps + * the output point {@code (x, y)} to a transformed input point + * {@code (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)}, where + * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input + * image, the output pixel is set to 0. + * + * @param images 4-D with shape {@code [batch, height, width, channels]}. + * @param transforms 2-D Tensor, {@code [batch, 8]} or {@code [1, 8]} matrix, where each row corresponds to a 3 x 3 + * projective transformation matrix, with the last entry assumed to be 1. If there + * is one row, the same transformation will be applied to all images. + * @param outputShape 1-D Tensor [new_height, new_width]. + * @param interpolation Interpolation method, "NEAREST" or "BILINEAR". + * @param options carries optional attribute values + * @param data type for {@code ImageProjectiveTransformV2} output and operands + * @return a new instance of ImageProjectiveTransformV2 + */ + public ImageProjectiveTransformV2 imageProjectiveTransformV2( + Operand images, Operand transforms, Operand outputShape, + String interpolation, ImageProjectiveTransformV2.Options... options) { + return ImageProjectiveTransformV2.create(scope, images, transforms, outputShape, interpolation, options); + } + + /** + * Applies the given transform to each of the images. + * If one row of {@code transforms} is {@code [a0, a1, a2, b0, b1, b2, c0, c1]}, then it maps + * the output point {@code (x, y)} to a transformed input point + * {@code (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)}, where + * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input + * image, the output pixel is set to fill_value. + * + * @param images 4-D with shape {@code [batch, height, width, channels]}. + * @param transforms 2-D Tensor, {@code [batch, 8]} or {@code [1, 8]} matrix, where each row corresponds to a 3 x 3 + * projective transformation matrix, with the last entry assumed to be 1. If there + * is one row, the same transformation will be applied to all images. + * @param outputShape 1-D Tensor [new_height, new_width]. + * @param fillValue float, the value to be filled when fill_mode is constant". + * @param interpolation Interpolation method, "NEAREST" or "BILINEAR". + * @param options carries optional attribute values + * @param data type for {@code ImageProjectiveTransformV3} output and operands + * @return a new instance of ImageProjectiveTransformV3 + */ + public ImageProjectiveTransformV3 imageProjectiveTransformV3( + Operand images, Operand transforms, Operand outputShape, + Operand fillValue, String interpolation, + ImageProjectiveTransformV3.Options... options) { + return ImageProjectiveTransformV3.create(scope, images, transforms, outputShape, fillValue, interpolation, options); + } + + /** + * Selects the k nearest centers for each point. + * Rows of points are assumed to be input points. Rows of centers are assumed to be + * the list of candidate centers. For each point, the k centers that have least L2 + * distance to it are computed. + * + * @param points Matrix of shape (n, d). Rows are assumed to be input points. + * @param centers Matrix of shape (m, d). Rows are assumed to be centers. + * @param k Number of nearest centers to return for each point. If k is larger than m, then + * only m centers are returned. + * @return a new instance of NearestNeighbors + */ + public NearestNeighbors nearestNeighbors(Operand points, Operand centers, + Operand k) { + return NearestNeighbors.create(scope, points, centers, k); + } + /** * Greedily selects a subset of bounding boxes in descending order of score, * pruning away boxes that have high intersection-over-union (IOU) overlap @@ -646,7 +779,6 @@ public HsvToRgb hsvToRgb(Operand images) { * To enable this Soft-NMS mode, set the {@code soft_nms_sigma} parameter to be * larger than 0. * - * @param data type for {@code selected_scores} output * @param boxes A 2-D float tensor of shape {@code [num_boxes, 4]}. * @param scores A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). @@ -706,7 +838,6 @@ public NonMaxSuppressionWithOverlaps nonMaxSuppressionWithOverlaps(Operand data type for {@code resized_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -730,7 +861,6 @@ public QuantizedResizeBilinear quantizedResizeBilinear(Op * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. * - * @param data type for {@code output} output * @param image 3-D of shape {@code [height, width, channels]}. * @param sizeOutput 1-D of length 2 containing: {@code crop_height}, {@code crop_width}.. * @param options carries optional attribute values @@ -780,6 +910,21 @@ public ResizeBicubic resizeBicubic(Operand images, Operand data type for {@code ResizeBicubicGrad} output and operands + * @return a new instance of ResizeBicubicGrad + */ + public ResizeBicubicGrad resizeBicubicGrad(Operand grads, + Operand originalImage, ResizeBicubicGrad.Options... options) { + return ResizeBicubicGrad.create(scope, grads, originalImage, options); + } + /** * Resize {@code images} to {@code size} using bilinear interpolation. * Input images can be of different types but output images are always float. @@ -795,10 +940,24 @@ public ResizeBilinear resizeBilinear(Operand images, return ResizeBilinear.create(scope, images, sizeOutput, options); } + /** + * Computes the gradient of bilinear interpolation. + * + * @param grads 4-D with shape {@code [batch, height, width, channels]}. + * @param originalImage 4-D with shape {@code [batch, orig_height, orig_width, channels]}, + * The image tensor that was resized. + * @param options carries optional attribute values + * @param data type for {@code ResizeBilinearGrad} output and operands + * @return a new instance of ResizeBilinearGrad + */ + public ResizeBilinearGrad resizeBilinearGrad(Operand grads, + Operand originalImage, ResizeBilinearGrad.Options... options) { + return ResizeBilinearGrad.create(scope, grads, originalImage, options); + } + /** * Resize {@code images} to {@code size} using nearest neighbor interpolation. * - * @param data type for {@code resized_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -811,6 +970,21 @@ public ResizeNearestNeighbor resizeNearestNeighbor(Operan return ResizeNearestNeighbor.create(scope, images, sizeOutput, options); } + /** + * Computes the gradient of nearest neighbor interpolation. + * + * @param grads 4-D with shape {@code [batch, height, width, channels]}. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code orig_height, orig_width}. The + * original input size. + * @param options carries optional attribute values + * @param data type for {@code ResizeNearestNeighborGrad} output and operands + * @return a new instance of ResizeNearestNeighborGrad + */ + public ResizeNearestNeighborGrad resizeNearestNeighborGrad( + Operand grads, Operand sizeOutput, ResizeNearestNeighborGrad.Options... options) { + return ResizeNearestNeighborGrad.create(scope, grads, sizeOutput, options); + } + /** * Converts one or more images from RGB to HSV. * Outputs a tensor of the same shape as the {@code images} tensor, containing the HSV @@ -835,7 +1009,6 @@ public ResizeNearestNeighbor resizeNearestNeighbor(Operan * * * - * @param data type for {@code output} output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. * @param data type for {@code RGBToHSV} output and operands * @return a new instance of RgbToHsv @@ -880,7 +1053,6 @@ public RgbToHsv rgbToHsv(Operand images) { * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * - * @param data type for {@code begin} output * @param imageSize 1-D, containing {@code [height, width, channels]}. * @param boundingBoxes 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. @@ -914,6 +1086,23 @@ public ScaleAndTranslate scaleAndTranslate(Operand images, return ScaleAndTranslate.create(scope, images, sizeOutput, scale, translation, options); } + /** + * The ScaleAndTranslateGrad operation + * + * @param grads The grads value + * @param originalImage The originalImage value + * @param scale The scale value + * @param translation The translation value + * @param options carries optional attribute values + * @param data type for {@code ScaleAndTranslateGrad} output and operands + * @return a new instance of ScaleAndTranslateGrad + */ + public ScaleAndTranslateGrad scaleAndTranslateGrad(Operand grads, + Operand originalImage, Operand scale, Operand translation, + ScaleAndTranslateGrad.Options... options) { + return ScaleAndTranslateGrad.create(scope, grads, originalImage, scale, translation, options); + } + /** * Generate a randomly distorted bounding box for an image deterministically. * Bounding box annotations are often supplied in addition to ground-truth labels @@ -975,7 +1164,6 @@ public ScaleAndTranslate scaleAndTranslate(Operand images, * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * - * @param data type for {@code begin} output * @param imageSize 1-D, containing {@code [height, width, channels]}. * @param boundingBoxes 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java index 303b4d5d618..5c33c56e962 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java @@ -27,7 +27,9 @@ import org.tensorflow.op.io.DecodePaddedRaw; import org.tensorflow.op.io.DecodeRaw; import org.tensorflow.op.io.DeserializeManySparse; +import org.tensorflow.op.io.DisableCopyOnRead; import org.tensorflow.op.io.EncodeBase64; +import org.tensorflow.op.io.FakeQueue; import org.tensorflow.op.io.FifoQueue; import org.tensorflow.op.io.FixedLengthRecordReader; import org.tensorflow.op.io.IdentityReader; @@ -76,7 +78,7 @@ /** * An API for building {@code io} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class IoOps { private final Scope scope; @@ -158,7 +160,6 @@ public DecodeJsonExample decodeJsonExample(Operand jsonExamples) { /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param data type for {@code output} output * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. @@ -175,7 +176,6 @@ public DecodePaddedRaw decodePaddedRaw(Operand i /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param data type for {@code output} output * @param bytes All the elements must have the same length. * @param outType The value of the outType attribute * @param options carries optional attribute values @@ -229,7 +229,6 @@ public DecodeRaw decodeRaw(Operand bytes, Class * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param serializedSparse 2-D, The {@code N} serialized {@code SparseTensor} objects. * Must have 3 columns. * @param dtype The {@code dtype} of the serialized {@code SparseTensor} objects. @@ -241,6 +240,17 @@ public DeserializeManySparse deserializeManySparse( return DeserializeManySparse.create(scope, serializedSparse, dtype); } + /** + * Turns off the copy-on-read mode. + * Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect. + * + * @param resource The resource handle of the resource variable. + * @return a new instance of DisableCopyOnRead + */ + public DisableCopyOnRead disableCopyOnRead(Operand resource) { + return DisableCopyOnRead.create(scope, resource); + } + /** * Encode strings into web-safe base64 format. * Refer to this article for more information on @@ -257,6 +267,16 @@ public EncodeBase64 encodeBase64(Operand input, EncodeBase64.Options... return EncodeBase64.create(scope, input, options); } + /** + * Deprecated. Do not use. + * + * @param resource The resource value + * @return a new instance of FakeQueue + */ + public FakeQueue fakeQueue(Operand resource) { + return FakeQueue.create(scope, resource); + } + /** * A queue that produces elements in first-in first-out order. * @@ -558,7 +578,6 @@ public ParseSingleSequenceExample parseSingleSequenceExample(Operand se /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param data type for {@code output} output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. @@ -860,7 +879,6 @@ public ReaderSerializeState readerSerializeState(Operand reader * rank {@code R-1}. *

The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the minibatch {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the minibatch {@code SparseTensor}. @@ -880,7 +898,6 @@ public SerializeManySparse serializeManySparse(Operand sparseIn * rank {@code R-1}. *

The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the minibatch {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the minibatch {@code SparseTensor}. @@ -897,7 +914,6 @@ public SerializeManySparse serializeManySparse(Operand data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the {@code SparseTensor}. @@ -911,7 +927,6 @@ public SerializeSparse serializeSparse(Operand sparseIndices, /** * Serialize a {@code SparseTensor} into a {@code [3]} {@code Tensor} object. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java index b1882373d4b..7cb8027ca3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java @@ -19,6 +19,7 @@ import org.tensorflow.Operand; import org.tensorflow.op.linalg.BandPart; +import org.tensorflow.op.linalg.BandedTriangularSolve; import org.tensorflow.op.linalg.BatchCholesky; import org.tensorflow.op.linalg.BatchCholeskyGrad; import org.tensorflow.op.linalg.BatchMatrixBandPart; @@ -49,10 +50,15 @@ import org.tensorflow.op.linalg.MatrixDiagPart; import org.tensorflow.op.linalg.MatrixDiagPartV3; import org.tensorflow.op.linalg.MatrixDiagV3; +import org.tensorflow.op.linalg.MatrixExponential; +import org.tensorflow.op.linalg.MatrixLogarithm; import org.tensorflow.op.linalg.MatrixSetDiag; import org.tensorflow.op.linalg.MatrixSolveLs; import org.tensorflow.op.linalg.Qr; import org.tensorflow.op.linalg.QuantizedMatMul; +import org.tensorflow.op.linalg.QuantizedMatMulWithBias; +import org.tensorflow.op.linalg.QuantizedMatMulWithBiasAndRelu; +import org.tensorflow.op.linalg.QuantizedMatMulWithBiasAndReluAndRequantize; import org.tensorflow.op.linalg.SelfAdjointEig; import org.tensorflow.op.linalg.Solve; import org.tensorflow.op.linalg.Sqrtm; @@ -61,6 +67,8 @@ import org.tensorflow.op.linalg.TensorDiagPart; import org.tensorflow.op.linalg.Transpose; import org.tensorflow.op.linalg.TriangularSolve; +import org.tensorflow.op.linalg.TridiagonalMatMul; +import org.tensorflow.op.linalg.TridiagonalSolve; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; @@ -72,9 +80,11 @@ /** * An API for building {@code linalg} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class LinalgOps { + public final LinalgSparseOps sparse; + private final Scope scope; private final Ops ops; @@ -82,6 +92,7 @@ public final class LinalgOps { LinalgOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; + sparse = new LinalgSparseOps(ops); } /** @@ -116,7 +127,6 @@ public final class LinalgOps { * tf.linalg.band_part(input, 0, 0) ==> Diagonal. * * - * @param data type for {@code band} output * @param input Rank {@code k} tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire * lower triangle. @@ -131,10 +141,23 @@ public BandPart bandPart(Operand inpu return BandPart.create(scope, input, numLower, numUpper); } + /** + * The BandedTriangularSolve operation + * + * @param matrix The matrix value + * @param rhs The rhs value + * @param options carries optional attribute values + * @param data type for {@code BandedTriangularSolve} output and operands + * @return a new instance of BandedTriangularSolve + */ + public BandedTriangularSolve bandedTriangularSolve(Operand matrix, + Operand rhs, BandedTriangularSolve.Options... options) { + return BandedTriangularSolve.create(scope, matrix, rhs, options); + } + /** * The BatchCholesky operation * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code BatchCholesky} output and operands * @return a new instance of BatchCholesky @@ -146,7 +169,6 @@ public BatchCholesky batchCholesky(Operand input) { /** * The BatchCholeskyGrad operation * - * @param data type for {@code output} output * @param l The l value * @param grad The grad value * @param data type for {@code BatchCholeskyGrad} output and operands @@ -159,7 +181,6 @@ public BatchCholeskyGrad batchCholeskyGrad(Operand l, /** * The BatchMatrixBandPart operation * - * @param data type for {@code band} output * @param input The input value * @param numLower The numLower value * @param numUpper The numUpper value @@ -174,7 +195,6 @@ public BatchMatrixBandPart batchMatrixBandPart(Operand i /** * The BatchMatrixDeterminant operation * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code BatchMatrixDeterminant} output and operands * @return a new instance of BatchMatrixDeterminant @@ -186,7 +206,6 @@ public BatchMatrixDeterminant batchMatrixDeterminant(Operan /** * The BatchMatrixDiag operation * - * @param data type for {@code output} output * @param diagonal The diagonal value * @param data type for {@code BatchMatrixDiag} output and operands * @return a new instance of BatchMatrixDiag @@ -198,7 +217,6 @@ public BatchMatrixDiag batchMatrixDiag(Operand diagonal) /** * The BatchMatrixDiagPart operation * - * @param data type for {@code diagonal} output * @param input The input value * @param data type for {@code BatchMatrixDiagPart} output and operands * @return a new instance of BatchMatrixDiagPart @@ -209,8 +227,11 @@ public BatchMatrixDiagPart batchMatrixDiagPart(Operand i /** * The BatchMatrixInverse operation + * DEPRECATED: This operation is deprecated and will be removed in a future version. + * Use tf.linalg.inv instead. + *

Computes the inverse of one or more square invertible matrices or their + * adjoints (conjugate transposes). * - * @param data type for {@code output} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchMatrixInverse} output and operands @@ -224,7 +245,6 @@ public BatchMatrixInverse batchMatrixInverse(Operand i /** * The BatchMatrixSetDiag operation * - * @param data type for {@code output} output * @param input The input value * @param diagonal The diagonal value * @param data type for {@code BatchMatrixSetDiag} output and operands @@ -238,7 +258,6 @@ public BatchMatrixSetDiag batchMatrixSetDiag(Operand inp /** * The BatchMatrixSolve operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param options carries optional attribute values @@ -253,7 +272,6 @@ public BatchMatrixSolve batchMatrixSolve(Operand matri /** * The BatchMatrixSolveLs operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param l2Regularizer The l2Regularizer value @@ -269,7 +287,6 @@ public BatchMatrixSolveLs batchMatrixSolveLs(Operand m /** * The BatchMatrixTriangularSolve operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param options carries optional attribute values @@ -284,7 +301,6 @@ public BatchMatrixTriangularSolve batchMatrixTriangularSo /** * The BatchSelfAdjointEigV2 operation * - * @param data type for {@code e} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchSelfAdjointEigV2} output and operands @@ -298,7 +314,6 @@ public BatchSelfAdjointEig batchSelfAdjointEig(Operand /** * The BatchSvd operation * - * @param data type for {@code s} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchSvd} output and operands @@ -321,7 +336,6 @@ public BatchSvd batchSvd(Operand input, BatchSvd.Options * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code Cholesky} output and operands * @return a new instance of Cholesky @@ -335,7 +349,6 @@ public Cholesky cholesky(Operand input) { * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. * - * @param data type for {@code output} output * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is {@code [..., M, M]}. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. @@ -355,7 +368,6 @@ public CholeskyGrad choleskyGrad(Operand l, Operand * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])} * - * @param data type for {@code y} output * @param x The x value * @param perm The perm value * @param data type for {@code ConjugateTranspose} output and operands @@ -372,7 +384,6 @@ public ConjugateTranspose conjugateTranspose(Operand x, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. * - * @param data type for {@code product} output * @param a A tensor containing 3-element vectors. * @param b Another tensor, of same type and shape as {@code a}. * @param data type for {@code Cross} output and operands @@ -388,7 +399,6 @@ public Cross cross(Operand a, Operand b) { * form square matrices. The output is a tensor containing the determinants * for all input submatrices {@code [..., :, :]}. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code MatrixDeterminant} output and operands * @return a new instance of Det @@ -410,7 +420,6 @@ public Det det(Operand input) { * e = eig(a, compute_v=False) * * - * @param data type for {@code e} output * @param input {@code Tensor} input of shape {@code [N, N]}. * @param Tout The value of the Tout attribute * @param options carries optional attribute values @@ -488,7 +497,6 @@ public Eig eig(Operand input, Class Tou *
{@literal @}end_compatibility * * - * @param data type for {@code output} output * @param inputs List of 1 or 2 Tensors. * @param equation String describing the Einstein Summation operation; in the format of np.einsum. * @param data type for {@code Einsum} output and operands @@ -505,7 +513,6 @@ public Einsum einsum(Iterable> inputs, String eq * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -528,7 +535,6 @@ public EuclideanNorm euclideanNorm(Operand input, * may detect the condition and raise an exception or it may simply return a * garbage result. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param options carries optional attribute values * @param data type for {@code MatrixInverse} output and operands @@ -606,7 +612,6 @@ public LoadAndRemapMatrix loadAndRemapMatrix(Operand ckptPath, * is the {@code LU} decomposition of the input and {@code P} is the corresponding * permutation matrix. * - * @param data type for {@code sign} output * @param input Shape is {@code [N, M, M]}. * @param data type for {@code LogMatrixDeterminant} output and operands * @return a new instance of LogMatrixDeterminant @@ -631,8 +636,6 @@ public LogMatrixDeterminant logMatrixDeterminant(Operand * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param data type for {@code lu} output - * @param data type for {@code p} output * @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of * size {@code [M, M]}. * @param data type for {@code Lu} output and operands @@ -658,8 +661,6 @@ public Lu lu(Operand input) { * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param data type for {@code lu} output - * @param data type for {@code p} output * @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of * size {@code [M, M]}. * @param outputIdxType The value of the outputIdxType attribute @@ -681,7 +682,6 @@ public Lu lu(Operand input, *

Note: The default kernel implementation for MatMul on GPUs uses * cublas. * - * @param data type for {@code product} output * @param a The a value * @param b The b value * @param options carries optional attribute values @@ -775,7 +775,6 @@ public MatMul matMul(Operand a, Operand b, MatMul.Opt * [9, 2]] * * - * @param data type for {@code output} output * @param diagonal Rank {@code r}, where {@code r >= 1} * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -860,7 +859,6 @@ public MatrixDiag matrixDiag(Operand diagonal, Operand * - * @param data type for {@code diagonal} output * @param input Rank {@code r} tensor where {@code r >= 2}. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -969,7 +967,6 @@ public MatrixDiagPart matrixDiagPart(Operand input, Oper * * * - * @param data type for {@code diagonal} output * @param input Rank {@code r} tensor where {@code r >= 2}. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -1097,7 +1094,6 @@ public MatrixDiagPartV3 matrixDiagPartV3(Operand input, * * * - * @param data type for {@code output} output * @param diagonal Rank {@code r}, where {@code r >= 1} * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -1121,6 +1117,39 @@ public MatrixDiagV3 matrixDiagV3(Operand diagonal, Opera return MatrixDiagV3.create(scope, diagonal, k, numRows, numCols, paddingValue, options); } + /** + * Deprecated, use python implementation tf.linalg.matrix_exponential. + * + * @param input The input value + * @param data type for {@code MatrixExponential} output and operands + * @return a new instance of MatrixExponential + */ + public MatrixExponential matrixExponential(Operand input) { + return MatrixExponential.create(scope, input); + } + + /** + * Computes the matrix logarithm of one or more square matrices: + * \(log(exp(A)) = A\) + *

This op is only defined for complex matrices. If A is positive-definite and + * real, then casting to a complex matrix, taking the logarithm and casting back + * to a real matrix will give the correct result. + *

This function computes the matrix logarithm using the Schur-Parlett algorithm. + * Details of the algorithm can be found in Section 11.6.2 of: + * Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. + * ISBN 978-0-898716-46-7. + *

The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions + * form square matrices. The output is a tensor of the same shape as the input + * containing the exponential for all input submatrices {@code [..., :, :]}. + * + * @param input Shape is {@code [..., M, M]}. + * @param data type for {@code MatrixLogarithm} output and operands + * @return a new instance of MatrixLogarithm + */ + public MatrixLogarithm matrixLogarithm(Operand input) { + return MatrixLogarithm.create(scope, input); + } + /** * Returns a batched matrix tensor with new batched diagonal values. * Given {@code input} and {@code diagonal}, this operation returns a tensor with the @@ -1220,7 +1249,6 @@ public MatrixDiagV3 matrixDiagV3(Operand diagonal, Opera * * * - * @param data type for {@code output} output * @param input Rank {@code r+1}, where {@code r >= 1}. * @param diagonal Rank {@code r} when {@code k} is an integer or {@code k[0] == k[1]}. Otherwise, it has rank {@code r+1}. * {@code k >= 1}. @@ -1270,7 +1298,6 @@ public MatrixSetDiag matrixSetDiag(Operand input, Operan * typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then * {@code l2_regularizer} is ignored. * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, N]}. * @param rhs Shape is {@code [..., M, K]}. * @param l2Regularizer Scalar tensor. @@ -1301,7 +1328,6 @@ public MatrixSolveLs matrixSolveLs(Operand matrix, Opera * q_full, r_full = qr(a, full_matrices=True) * * - * @param data type for {@code q} output * @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * @param options carries optional attribute values @@ -1319,7 +1345,6 @@ public Qr qr(Operand input, Qr.Options... options) { * outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). * - * @param data type for {@code out} output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. * @param minA The float value that the lowest quantized {@code a} value represents. @@ -1341,6 +1366,100 @@ public QuantizedMatMul quantizedMatMul return QuantizedMatMul.create(scope, a, b, minA, maxA, minB, maxB, Toutput, Tactivation, options); } + /** + * Performs a quantized matrix multiplication of {@code a} by the matrix {@code b} with bias + * add. + * The inputs must be two-dimensional matrices and 1D bias vector. And the inner + * dimension of {@code a} (after being transposed if {@code transpose_a} is non-zero) must + * match the outer dimension of {@code b} (after being transposed if {@code transposed_b} is + * non-zero). Then do broadcast add operation with bias values on the matrix + * multiplication result. The bias size must match inner dimension of {@code b}. + * + * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. + * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. + * @param bias A 1D bias tensor with size matching inner dimension of {@code b} (after being + * transposed if {@code transposed_b} is non-zero). + * @param minA The float value that the lowest quantized {@code a} value represents. + * @param maxA The float value that the highest quantized {@code a} value represents. + * @param minB The float value that the lowest quantized {@code b} value represents. + * @param maxB The float value that the highest quantized {@code b} value represents. + * @param Toutput The value of the Toutput attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedMatMulWithBias} output and operands + * @return a new instance of QuantizedMatMulWithBias + */ + public QuantizedMatMulWithBias quantizedMatMulWithBias( + Operand a, Operand b, Operand bias, + Operand minA, Operand maxA, Operand minB, + Operand maxB, Class Toutput, QuantizedMatMulWithBias.Options... options) { + return QuantizedMatMulWithBias.create(scope, a, b, bias, minA, maxA, minB, maxB, Toutput, options); + } + + /** + * Perform a quantized matrix multiplication of {@code a} by the matrix {@code b} with bias + * add and relu fusion. + * The inputs must be two-dimensional matrices and 1D bias vector. And the inner + * dimension of {@code a} (after being transposed if {@code transpose_a} is non-zero) must + * match the outer dimension of {@code b} (after being transposed if {@code transposed_b} is + * non-zero). Then do broadcast add operation with bias values on the matrix + * multiplication result. The bias size must match inner dimension of {@code b}. Then do + * relu activation to get non-negative result. + * + * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. + * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. + * @param bias A 1D bias tensor with size matching with inner dimension of {@code b} (after being + * transposed if {@code transposed_b} is non-zero). + * @param minA The float value that the lowest quantized {@code a} value represents. + * @param maxA The float value that the highest quantized {@code a} value represents. + * @param minB The float value that the lowest quantized {@code b} value represents. + * @param maxB The float value that the highest quantized {@code b} value represents. + * @param Toutput The value of the Toutput attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedMatMulWithBiasAndRelu} output and operands + * @return a new instance of QuantizedMatMulWithBiasAndRelu + */ + public QuantizedMatMulWithBiasAndRelu quantizedMatMulWithBiasAndRelu( + Operand a, Operand b, Operand bias, + Operand minA, Operand maxA, Operand minB, + Operand maxB, Class Toutput, QuantizedMatMulWithBiasAndRelu.Options... options) { + return QuantizedMatMulWithBiasAndRelu.create(scope, a, b, bias, minA, maxA, minB, maxB, Toutput, options); + } + + /** + * Perform a quantized matrix multiplication of {@code a} by the matrix {@code b} with bias + * add and relu and requantize fusion. + * The inputs must be two-dimensional matrices and 1D bias vector. And the inner + * dimension of {@code a} (after being transposed if {@code transpose_a} is non-zero) must + * match the outer dimension of {@code b} (after being transposed if {@code transposed_b} is + * non-zero). Then do broadcast add operation with bias values on the matrix + * multiplication result. The bias size must match inner dimension of {@code b}. Then do + * relu activation to get non-negative result. Then do requantize operation to get + * final uint8 result. + * + * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. + * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. + * @param bias A 1D bias tensor with size matching with inner dimension of {@code b} (after being + * transposed if {@code transposed_b} is non-zero). + * @param minA The float value that the lowest quantized {@code a} value represents. + * @param maxA The float value that the highest quantized {@code a} value represents. + * @param minB The float value that the lowest quantized {@code b} value represents. + * @param maxB The float value that the highest quantized {@code b} value represents. + * @param minFreezedOutput The float value that the highest quantized output value after requantize. + * @param maxFreezedOutput The maxFreezedOutput value + * @param Toutput The value of the Toutput attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedMatMulWithBiasAndReluAndRequantize} output and operands + * @return a new instance of QuantizedMatMulWithBiasAndReluAndRequantize + */ + public QuantizedMatMulWithBiasAndReluAndRequantize quantizedMatMulWithBiasAndReluAndRequantize( + Operand a, Operand b, Operand bias, + Operand minA, Operand maxA, Operand minB, + Operand maxB, Operand minFreezedOutput, + Operand maxFreezedOutput, Class Toutput, + QuantizedMatMulWithBiasAndReluAndRequantize.Options... options) { + return QuantizedMatMulWithBiasAndReluAndRequantize.create(scope, a, b, bias, minA, maxA, minB, maxB, minFreezedOutput, maxFreezedOutput, Toutput, options); + } + /** * Computes the eigen decomposition of one or more square self-adjoint matrices. * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in @@ -1354,7 +1473,6 @@ public QuantizedMatMul quantizedMatMul * e = self_adjoint_eig(a, compute_v=False) * * - * @param data type for {@code e} output * @param input {@code Tensor} input of shape {@code [N, N]}. * @param options carries optional attribute values * @param data type for {@code SelfAdjointEigV2} output and operands @@ -1374,7 +1492,6 @@ public SelfAdjointEig selfAdjointEig(Operand input, * If {@code adjoint} is {@code True} then each output matrix satisfies * {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}. * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, M]}. * @param rhs Shape is {@code [..., M, K]}. * @param options carries optional attribute values @@ -1401,7 +1518,6 @@ public Solve solve(Operand matrix, Operand rhs, * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices {@code [..., :, :]}. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code MatrixSquareRoot} output and operands * @return a new instance of Sqrtm @@ -1423,7 +1539,6 @@ public Sqrtm sqrtm(Operand input) { * s, _, _ = svd(a, compute_uv=False) * * - * @param data type for {@code s} output * @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * @param options carries optional attribute values @@ -1450,7 +1565,6 @@ public Svd svd(Operand input, Svd.Options... options) { * [0, 0, 0, 4]] * * - * @param data type for {@code output} output * @param diagonal Rank k tensor where k is at most 1. * @param data type for {@code Diag} output and operands * @return a new instance of TensorDiag @@ -1476,7 +1590,6 @@ public TensorDiag tensorDiag(Operand diagonal) { * tf.diag_part(input) ==> [1, 2, 3, 4] * * - * @param data type for {@code diagonal} output * @param input Rank k tensor where k is even and not zero. * @param data type for {@code DiagPart} output and operands * @return a new instance of TensorDiagPart @@ -1490,7 +1603,6 @@ public TensorDiagPart tensorDiagPart(Operand input) { * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * - * @param data type for {@code y} output * @param x The x value * @param perm The perm value * @param data type for {@code Transpose} output and operands @@ -1545,7 +1657,6 @@ public Transpose transpose(Operand x, Operand * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, M]}. * @param rhs Shape is {@code [..., M, K]}. * @param options carries optional attribute values @@ -1557,6 +1668,51 @@ public TriangularSolve triangularSolve(Operand matrix, O return TriangularSolve.create(scope, matrix, rhs, options); } + /** + * Calculate product with tridiagonal matrix. + * Calculates product of two matrices, where left matrix is a tridiagonal matrix. + * + * @param superdiag Tensor of shape {@code [..., 1, M]}, representing superdiagonals of + * tri-diagonal matrices to the left of multiplication. Last element is ignored. + * @param maindiag Tensor of shape {@code [..., 1, M]}, representing main diagonals of tri-diagonal + * matrices to the left of multiplication. + * @param subdiag Tensor of shape {@code [..., 1, M]}, representing subdiagonals of tri-diagonal + * matrices to the left of multiplication. First element is ignored. + * @param rhs Tensor of shape {@code [..., M, N]}, representing MxN matrices to the right of + * multiplication. + * @param data type for {@code TridiagonalMatMul} output and operands + * @return a new instance of TridiagonalMatMul + */ + public TridiagonalMatMul tridiagonalMatMul(Operand superdiag, + Operand maindiag, Operand subdiag, Operand rhs) { + return TridiagonalMatMul.create(scope, superdiag, maindiag, subdiag, rhs); + } + + /** + * Solves tridiagonal systems of equations. + * Solves tridiagonal systems of equations. + * Supports batch dimensions and multiple right-hand sides per each left-hand + * side. + * On CPU, solution is computed via Gaussian elimination with or without partial + * pivoting, depending on {@code partial_pivoting} attribute. On GPU, Nvidia's cuSPARSE + * library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv + * Partial pivoting is not yet supported by XLA backends. + * + * @param diagonals Tensor of shape {@code [..., 3, M]} whose innermost 2 dimensions represent the + * tridiagonal matrices with three rows being the superdiagonal, diagonals, and + * subdiagonals, in order. The last element of the superdiagonal and the first + * element of the subdiagonal is ignored. + * @param rhs Tensor of shape {@code [..., M, K]}, representing K right-hand sides per each + * left-hand side. + * @param options carries optional attribute values + * @param data type for {@code TridiagonalSolve} output and operands + * @return a new instance of TridiagonalSolve + */ + public TridiagonalSolve tridiagonalSolve(Operand diagonals, + Operand rhs, TridiagonalSolve.Options... options) { + return TridiagonalSolve.create(scope, diagonals, rhs, options); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java new file mode 100644 index 00000000000..7210249ba1f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java @@ -0,0 +1,480 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.linalg.sparse.CSRSparseMatrixComponents; +import org.tensorflow.op.linalg.sparse.CSRSparseMatrixToDense; +import org.tensorflow.op.linalg.sparse.CSRSparseMatrixToSparseTensor; +import org.tensorflow.op.linalg.sparse.DenseToCSRSparseMatrix; +import org.tensorflow.op.linalg.sparse.SparseMatrixAdd; +import org.tensorflow.op.linalg.sparse.SparseMatrixMatMul; +import org.tensorflow.op.linalg.sparse.SparseMatrixMul; +import org.tensorflow.op.linalg.sparse.SparseMatrixNNZ; +import org.tensorflow.op.linalg.sparse.SparseMatrixOrderingAMD; +import org.tensorflow.op.linalg.sparse.SparseMatrixSoftmax; +import org.tensorflow.op.linalg.sparse.SparseMatrixSoftmaxGrad; +import org.tensorflow.op.linalg.sparse.SparseMatrixSparseCholesky; +import org.tensorflow.op.linalg.sparse.SparseMatrixSparseMatMul; +import org.tensorflow.op.linalg.sparse.SparseMatrixTranspose; +import org.tensorflow.op.linalg.sparse.SparseMatrixZeros; +import org.tensorflow.op.linalg.sparse.SparseTensorToCSRSparseMatrix; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * An API for building {@code linalg.sparse} operations as {@link Op Op}s + * + * @see Ops + */ +public final class LinalgSparseOps { + private final Scope scope; + + private final Ops ops; + + LinalgSparseOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Reads out the CSR components at batch {@code index}. + * This op is meant only for debugging / testing, and its interface is not expected + * to be stable. + * + * @param csrSparseMatrix A batched CSRSparseMatrix. + * @param index The index in {@code csr_sparse_matrix}'s batch. + * @param type The value of the type attribute + * @param data type for {@code CSRSparseMatrixComponents} output and operands + * @return a new instance of CSRSparseMatrixComponents + */ + public CSRSparseMatrixComponents cSRSparseMatrixComponents( + Operand csrSparseMatrix, Operand index, Class type) { + return CSRSparseMatrixComponents.create(scope, csrSparseMatrix, index, type); + } + + /** + * Convert a (possibly batched) CSRSparseMatrix to dense. + * + * @param sparseInput A batched CSRSparseMatrix. + * @param type The value of the type attribute + * @param data type for {@code CSRSparseMatrixToDense} output and operands + * @return a new instance of CSRSparseMatrixToDense + */ + public CSRSparseMatrixToDense cSRSparseMatrixToDense( + Operand sparseInput, Class type) { + return CSRSparseMatrixToDense.create(scope, sparseInput, type); + } + + /** + * Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. + * + * @param sparseMatrix A (possibly batched) CSRSparseMatrix. + * @param type The value of the type attribute + * @param data type for {@code CSRSparseMatrixToSparseTensor} output and operands + * @return a new instance of CSRSparseMatrixToSparseTensor + */ + public CSRSparseMatrixToSparseTensor cSRSparseMatrixToSparseTensor( + Operand sparseMatrix, Class type) { + return CSRSparseMatrixToSparseTensor.create(scope, sparseMatrix, type); + } + + /** + * Converts a dense tensor to a (possibly batched) CSRSparseMatrix. + * + * @param denseInput A Dense tensor. + * @param indices Indices of nonzero elements. + * @return a new instance of DenseToCSRSparseMatrix + */ + public DenseToCSRSparseMatrix denseToCSRSparseMatrix(Operand denseInput, + Operand indices) { + return DenseToCSRSparseMatrix.create(scope, denseInput, indices); + } + + /** + * Sparse addition of two CSR matrices, C = alpha * A + beta * B. + * The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not + * currently defined (TensorFlow will return zeros for these entries). + * + * @param a A CSRSparseMatrix. + * @param b A CSRSparseMatrix. + * @param alpha A constant scalar. + * @param beta A constant scalar. + * @param data type for {@code SparseMatrixAdd} output and operands + * @return a new instance of SparseMatrixAdd + */ + public SparseMatrixAdd sparseMatrixAdd(Operand a, + Operand b, Operand alpha, Operand beta) { + return SparseMatrixAdd.create(scope, a, b, alpha, beta); + } + + /** + * Matrix-multiplies a sparse matrix with a dense matrix. + * Returns a dense matrix. + * For inputs A and B, where A is CSR and B is dense; this op returns a dense C; + *

If transpose_output is false, returns: + *

+   *    C = A . B
+   *  
+ *

If transpose_output is {@code true}, returns: + *

+   *    C = transpose(A . B) = transpose(B) . transpose(A)
+   *  
+ *

where the transposition is performed along the two innermost (matrix) + * dimensions. + *

If conjugate_output is {@code true}, returns: + *

+   *    C = conjugate(A . B) = conjugate(A) . conjugate(B)
+   *  
+ *

If both conjugate_output and transpose_output are {@code true}, returns: + *

+   *    C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .
+   *                                      conjugate(transpose(A))
+   *  
+ * + * @param a A CSRSparseMatrix. + * @param b A dense tensor. + * @param options carries optional attribute values + * @param data type for {@code SparseMatrixMatMul} output and operands + * @return a new instance of SparseMatrixMatMul + */ + public SparseMatrixMatMul sparseMatrixMatMul(Operand a, + Operand b, SparseMatrixMatMul.Options... options) { + return SparseMatrixMatMul.create(scope, a, b, options); + } + + /** + * Element-wise multiplication of a sparse matrix with a dense tensor. + * Returns a sparse matrix. + *

The dense tensor {@code b} may be either a scalar; otherwise {@code a} must be a rank-3 + * {@code SparseMatrix}; in this case {@code b} must be shaped {@code [batch_size, 1, 1]} and the + * multiply operation broadcasts. + *

NOTE even if {@code b} is zero, the sparsity structure of the output does not + * change. + * + * @param a A CSRSparseMatrix. + * @param b A dense tensor. + * @return a new instance of SparseMatrixMul + */ + public SparseMatrixMul sparseMatrixMul(Operand a, Operand b) { + return SparseMatrixMul.create(scope, a, b); + } + + /** + * Returns the number of nonzeroes of {@code sparse_matrix}. + * + * @param sparseMatrix A CSRSparseMatrix. + * @return a new instance of SparseMatrixNNZ + */ + public SparseMatrixNNZ sparseMatrixNNZ(Operand sparseMatrix) { + return SparseMatrixNNZ.create(scope, sparseMatrix); + } + + /** + * Computes the Approximate Minimum Degree (AMD) ordering of {@code input}. + * Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix. + *

The returned permutation may be used to permute the rows and columns of the + * given sparse matrix. This typically results in permuted sparse matrix's sparse + * Cholesky (or other decompositions) in having fewer zero fill-in compared to + * decomposition of the original matrix. + *

The input sparse matrix may have rank 2 or rank 3. The output Tensor, + * representing would then have rank 1 or 2 respectively, with the same batch + * shape as the input. + *

Each component of the input sparse matrix must represent a square symmetric + * matrix; only the lower triangular part of the matrix is read. The values of the + * sparse matrix does not affect the returned permutation, only the sparsity + * pattern of the sparse matrix is used. Hence, a single AMD ordering may be + * reused for the Cholesky decompositions of sparse matrices with the same sparsity + * pattern but with possibly different values. + *

Each batch component of the output permutation represents a permutation of {@code N} + * elements, where the input sparse matrix components each have {@code N} rows. That is, + * the component contains each of the integers {@code {0, .. N-1}} exactly once. The + * {@code i}th element represents the row index that the {@code i}th row maps to. + *

Usage example: + *

+   *      from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
+   *
+   *      a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
+   *      a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
+   *      a_dense_shape = [4, 4]
+   *
+   *      with tf.Session() as sess:
+   *        # Define (COO format) SparseTensor over Numpy array.
+   *        a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
+   *
+   *        # Convert SparseTensors to CSR SparseMatrix.
+   *        a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
+   *            a_st.indices, a_st.values, a_st.dense_shape)
+   *
+   *        # Obtain the AMD Ordering for the CSR SparseMatrix.
+   *        ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
+   *
+   *        ordering_amd_value = sess.run(ordering_amd)
+   *  
+ *

{@code ordering_amd_value} stores the AMD ordering: {@code [1 2 3 0]}. + *

input: A {@code CSRSparseMatrix}. + * + * @param input A {@code CSRSparseMatrix}. + * @return a new instance of SparseMatrixOrderingAMD + */ + public SparseMatrixOrderingAMD sparseMatrixOrderingAMD(Operand input) { + return SparseMatrixOrderingAMD.create(scope, input); + } + + /** + * Calculates the softmax of a CSRSparseMatrix. + * Calculate the softmax of the innermost dimensions of a SparseMatrix. + *

Missing values are treated as {@code -inf} (i.e., logits of zero probability); and + * the output has the same sparsity structure as the input (though missing values + * in the output may now be treated as having probability zero). + * + * @param logits A CSRSparseMatrix. + * @param type The value of the type attribute + * @param data type for {@code SparseMatrixSoftmax} output and operands + * @return a new instance of SparseMatrixSoftmax + */ + public SparseMatrixSoftmax sparseMatrixSoftmax( + Operand logits, Class type) { + return SparseMatrixSoftmax.create(scope, logits, type); + } + + /** + * Calculates the gradient of the SparseMatrixSoftmax op. + * + * @param softmax A CSRSparseMatrix. + * @param gradSoftmax The gradient of {@code softmax}. + * @param type The value of the type attribute + * @param data type for {@code SparseMatrixSoftmaxGrad} output and operands + * @return a new instance of SparseMatrixSoftmaxGrad + */ + public SparseMatrixSoftmaxGrad sparseMatrixSoftmaxGrad( + Operand softmax, Operand gradSoftmax, Class type) { + return SparseMatrixSoftmaxGrad.create(scope, softmax, gradSoftmax, type); + } + + /** + * Computes the sparse Cholesky decomposition of {@code input}. + * Computes the Sparse Cholesky decomposition of a sparse matrix, with the given + * fill-in reducing permutation. + *

The input sparse matrix and the fill-in reducing permutation {@code permutation} must + * have compatible shapes. If the sparse matrix has rank 3; with the batch + * dimension {@code B}, then the {@code permutation} must be of rank 2; with the same batch + * dimension {@code B}. There is no support for broadcasting. + *

Furthermore, each component vector of {@code permutation} must be of length {@code N}, + * containing each of the integers {0, 1, ..., N - 1} exactly once, where {@code N} is + * the number of rows of each component of the sparse matrix. + *

Each component of the input sparse matrix must represent a symmetric positive + * definite (SPD) matrix; although only the lower triangular part of the matrix is + * read. If any individual component is not SPD, then an InvalidArgument error is + * thrown. + *

The returned sparse matrix has the same dense shape as the input sparse matrix. + * For each component {@code A} of the input sparse matrix, the corresponding output + * sparse matrix represents {@code L}, the lower triangular Cholesky factor satisfying + * the following identity: + *

+   *    A = L * Lt
+   *  
+ *

where Lt denotes the transpose of L (or its conjugate transpose, if {@code type} is + * {@code complex64} or {@code complex128}). + *

The {@code type} parameter denotes the type of the matrix elements. The supported + * types are: {@code float32}, {@code float64}, {@code complex64} and {@code complex128}. + *

Usage example: + *

+   *      from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
+   *
+   *      a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
+   *      a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
+   *      a_dense_shape = [4, 4]
+   *
+   *      with tf.Session() as sess:
+   *        # Define (COO format) SparseTensor over Numpy array.
+   *        a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
+   *
+   *        # Convert SparseTensors to CSR SparseMatrix.
+   *        a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
+   *            a_st.indices, a_st.values, a_st.dense_shape)
+   *
+   *        # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero
+   *        # fill-in (number of structural non-zeros in the sparse Cholesky factor).
+   *        ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
+   *        cholesky_sparse_matrices = (
+   *            sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
+   *                sparse_matrix, ordering_amd, type=tf.float32))
+   *
+   *        # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor
+   *        dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
+   *            cholesky_sparse_matrices, tf.float32)
+   *
+   *        # Evaluate the dense Tensor value.
+   *        dense_cholesky_value = sess.run(dense_cholesky)
+   *  
+ *

{@code dense_cholesky_value} stores the dense Cholesky factor: + *

+   *      [[  1.  0.    0.    0.]
+   *       [  0.  1.41  0.    0.]
+   *       [  0.  0.70  1.58  0.]
+   *       [  0.  0.    0.    2.]]
+   *  
+ *

input: A {@code CSRSparseMatrix}. + * permutation: A {@code Tensor}. + * type: The type of {@code input}. + * + * @param input A {@code CSRSparseMatrix}. + * @param permutation A fill-in reducing permutation matrix. + * @param type The value of the type attribute + * @param data type for {@code SparseMatrixSparseCholesky} output and operands + * @return a new instance of SparseMatrixSparseCholesky + */ + public SparseMatrixSparseCholesky sparseMatrixSparseCholesky( + Operand input, Operand permutation, Class type) { + return SparseMatrixSparseCholesky.create(scope, input, permutation, type); + } + + /** + * Sparse-matrix-multiplies two CSR matrices {@code a} and {@code b}. + * Performs a matrix multiplication of a sparse matrix {@code a} with a sparse matrix + * {@code b}; returns a sparse matrix {@code a * b}, unless either {@code a} or {@code b} is transposed or + * adjointed. + *

Each matrix may be transposed or adjointed (conjugated and transposed) + * according to the Boolean parameters {@code transpose_a}, {@code adjoint_a}, {@code transpose_b} + * and {@code adjoint_b}. At most one of {@code transpose_a} or {@code adjoint_a} may be True. + * Similarly, at most one of {@code transpose_b} or {@code adjoint_b} may be True. + *

The inputs must have compatible shapes. That is, the inner dimension of {@code a} + * must be equal to the outer dimension of {@code b}. This requirement is adjusted + * according to whether either {@code a} or {@code b} is transposed or adjointed. + *

The {@code type} parameter denotes the type of the matrix elements. Both {@code a} and {@code b} + * must have the same type. The supported types are: {@code float32}, {@code float64}, + * {@code complex64} and {@code complex128}. + *

Both {@code a} and {@code b} must have the same rank. Broadcasting is not supported. If they + * have rank 3, each batch of 2D CSRSparseMatrices within {@code a} and {@code b} must have the + * same dense shape. + *

The sparse matrix product may have numeric (non-structural) zeros. + * TODO(anudhyan): Consider adding a boolean attribute to control whether to prune + * zeros. + *

Usage example: + *

+   *      from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
+   *
+   *      a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
+   *      a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)
+   *      a_dense_shape = [4, 5]
+   *
+   *      b_indices = np.array([[0, 0], [3, 0], [3, 1]])
+   *      b_values = np.array([2.0, 7.0, 8.0], np.float32)
+   *      b_dense_shape = [5, 3]
+   *
+   *      with tf.Session() as sess:
+   *        # Define (COO format) Sparse Tensors over Numpy arrays
+   *        a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
+   *        b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)
+   *
+   *        # Convert SparseTensors to CSR SparseMatrix
+   *        a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
+   *            a_st.indices, a_st.values, a_st.dense_shape)
+   *        b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
+   *            b_st.indices, b_st.values, b_st.dense_shape)
+   *
+   *        # Compute the CSR SparseMatrix matrix multiplication
+   *        c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
+   *            a=a_sm, b=b_sm, type=tf.float32)
+   *
+   *        # Convert the CSR SparseMatrix product to a dense Tensor
+   *        c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
+   *            c_sm, tf.float32)
+   *        # Evaluate the dense Tensor value
+   *        c_sm_dense_value = sess.run(c_sm_dense)
+   *  
+ *

{@code c_sm_dense_value} stores the dense matrix product: + *

+   *      [[  2.   0.   0.]
+   *       [  0.   0.   0.]
+   *       [ 35.  40.   0.]
+   *       [ -4.   0.   0.]]
+   *  
+ *

a: A {@code CSRSparseMatrix}. + * b: A {@code CSRSparseMatrix} with the same type and rank as {@code a}. + * type: The type of both {@code a} and {@code b}. + * transpose_a: If True, {@code a} transposed before multiplication. + * transpose_b: If True, {@code b} transposed before multiplication. + * adjoint_a: If True, {@code a} adjointed before multiplication. + * adjoint_b: If True, {@code b} adjointed before multiplication. + * + * @param a A CSRSparseMatrix. + * @param b A CSRSparseMatrix. + * @param type The value of the type attribute + * @param options carries optional attribute values + * @param data type for {@code SparseMatrixSparseMatMul} output and operands + * @return a new instance of SparseMatrixSparseMatMul + */ + public SparseMatrixSparseMatMul sparseMatrixSparseMatMul( + Operand a, Operand b, Class type, + SparseMatrixSparseMatMul.Options... options) { + return SparseMatrixSparseMatMul.create(scope, a, b, type, options); + } + + /** + * Transposes the inner (matrix) dimensions of a CSRSparseMatrix. + * Transposes the inner (matrix) dimensions of a SparseMatrix and optionally + * conjugates its values. + * + * @param input A CSRSparseMatrix. + * @param type The value of the type attribute + * @param options carries optional attribute values + * @param data type for {@code SparseMatrixTranspose} output and operands + * @return a new instance of SparseMatrixTranspose + */ + public SparseMatrixTranspose sparseMatrixTranspose( + Operand input, Class type, SparseMatrixTranspose.Options... options) { + return SparseMatrixTranspose.create(scope, input, type, options); + } + + /** + * Creates an all-zeros CSRSparseMatrix with shape {@code dense_shape}. + * + * @param denseShape The desired matrix shape. + * @param type The value of the type attribute + * @param data type for {@code SparseMatrixZeros} output and operands + * @return a new instance of SparseMatrixZeros + */ + public SparseMatrixZeros sparseMatrixZeros(Operand denseShape, + Class type) { + return SparseMatrixZeros.create(scope, denseShape, type); + } + + /** + * Converts a SparseTensor to a (possibly batched) CSRSparseMatrix. + * + * @param indices SparseTensor indices. + * @param values SparseTensor values. + * @param denseShape SparseTensor dense shape. + * @return a new instance of SparseTensorToCSRSparseMatrix + */ + public SparseTensorToCSRSparseMatrix sparseTensorToCSRSparseMatrix(Operand indices, + Operand values, Operand denseShape) { + return SparseTensorToCSRSparseMatrix.create(scope, indices, values, denseShape); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java index ae95a5c9cd6..d3dcfc686ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java @@ -34,6 +34,10 @@ import org.tensorflow.op.math.Atan; import org.tensorflow.op.math.Atan2; import org.tensorflow.op.math.Atanh; +import org.tensorflow.op.math.BesselI0; +import org.tensorflow.op.math.BesselI0e; +import org.tensorflow.op.math.BesselI1; +import org.tensorflow.op.math.BesselI1e; import org.tensorflow.op.math.Betainc; import org.tensorflow.op.math.Bincount; import org.tensorflow.op.math.Ceil; @@ -43,6 +47,7 @@ import org.tensorflow.op.math.Cosh; import org.tensorflow.op.math.Cumprod; import org.tensorflow.op.math.Cumsum; +import org.tensorflow.op.math.CumulativeLogsumexp; import org.tensorflow.op.math.DenseBincount; import org.tensorflow.op.math.Digamma; import org.tensorflow.op.math.Div; @@ -59,6 +64,7 @@ import org.tensorflow.op.math.Greater; import org.tensorflow.op.math.GreaterEqual; import org.tensorflow.op.math.Igamma; +import org.tensorflow.op.math.IgammaGradA; import org.tensorflow.op.math.Igammac; import org.tensorflow.op.math.Imag; import org.tensorflow.op.math.InvertPermutation; @@ -91,27 +97,37 @@ import org.tensorflow.op.math.Real; import org.tensorflow.op.math.RealDiv; import org.tensorflow.op.math.Reciprocal; +import org.tensorflow.op.math.ReciprocalGrad; +import org.tensorflow.op.math.RequantizationRangePerChannel; +import org.tensorflow.op.math.RequantizePerChannel; import org.tensorflow.op.math.Rint; import org.tensorflow.op.math.Round; import org.tensorflow.op.math.Rsqrt; +import org.tensorflow.op.math.RsqrtGrad; import org.tensorflow.op.math.SegmentMax; import org.tensorflow.op.math.SegmentMean; import org.tensorflow.op.math.SegmentMin; import org.tensorflow.op.math.SegmentProd; import org.tensorflow.op.math.SegmentSum; import org.tensorflow.op.math.Sigmoid; +import org.tensorflow.op.math.SigmoidGrad; import org.tensorflow.op.math.Sign; import org.tensorflow.op.math.Sin; import org.tensorflow.op.math.Sinh; +import org.tensorflow.op.math.SobolSample; import org.tensorflow.op.math.Softplus; +import org.tensorflow.op.math.SoftplusGrad; import org.tensorflow.op.math.Sqrt; +import org.tensorflow.op.math.SqrtGrad; import org.tensorflow.op.math.Square; import org.tensorflow.op.math.SquaredDifference; import org.tensorflow.op.math.Sub; import org.tensorflow.op.math.Tan; import org.tensorflow.op.math.Tanh; +import org.tensorflow.op.math.TanhGrad; import org.tensorflow.op.math.TruncateDiv; import org.tensorflow.op.math.TruncateMod; +import org.tensorflow.op.math.UniformQuantizedAdd; import org.tensorflow.op.math.UnsortedSegmentMax; import org.tensorflow.op.math.UnsortedSegmentMin; import org.tensorflow.op.math.UnsortedSegmentProd; @@ -131,9 +147,11 @@ /** * An API for building {@code math} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class MathOps { + public final MathSpecialOps special; + private final Scope scope; private final Ops ops; @@ -141,6 +159,7 @@ public final class MathOps { MathOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; + special = new MathSpecialOps(ops); } /** @@ -149,7 +168,6 @@ public final class MathOps { * value of each element in {@code x}. For example, if x is an input element and y is * an output element, this operation computes \(y = |x|\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Abs} output and operands * @return a new instance of Abs @@ -167,7 +185,6 @@ public Abs abs(Operand x) { *

Unlike the original {@code accumulate_n}, {@code accumulate_n_v2} is differentiable. *

Returns a {@code Tensor} of same shape and type as the elements of {@code inputs}. * - * @param data type for {@code sum} output * @param inputs A list of {@code Tensor} objects, each with same shape and type. * @param shape Shape of elements of {@code inputs}. * @param data type for {@code AccumulateNV2} output and operands @@ -182,7 +199,6 @@ public AccumulateN accumulateN(Iterable> inputs, * Provided an input tensor, the {@code tf.math.acos} operation returns the inverse cosine of each element of the tensor. If {@code y = tf.math.cos(x)} then, {@code x = tf.math.acos(y)}. *

Input range is {@code [-1, 1]} and the output has a range of {@code [0, pi]}. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Acos} output and operands * @return a new instance of Acos @@ -200,7 +216,6 @@ public Acos acos(Operand x) { * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Acosh} output and operands * @return a new instance of Acosh @@ -216,7 +231,6 @@ public Acosh acosh(Operand x) { *

Given two input tensors, the {@code tf.add} operation computes the sum for every element in the tensor. *

Both input and output have a range {@code (-inf, inf)}. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Add} output and operands @@ -234,7 +248,6 @@ public Add add(Operand x, Operand y) { * tf.math.add_n(x) ==> 26 * * - * @param data type for {@code sum} output * @param inputs The inputs value * @param data type for {@code AddN} output and operands * @return a new instance of AddN @@ -259,7 +272,6 @@ public AddN addN(Iterable> inputs) { * Equivalent to np.angle. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Angle, with default output types */ @@ -283,7 +295,6 @@ public Angle angle(Operand input) { * Equivalent to np.angle. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Angle} output and operands @@ -320,7 +331,6 @@ public ApproximateEqual approximateEqual(Operand x, Operand * # here a[4] = 166.32 which is the largest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int16, int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -345,7 +355,6 @@ public ArgMax argMax(Operand input, * # here a[4] = 166.32 which is the largest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int16, int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -372,7 +381,6 @@ public ArgMax argMax(Operand input, * # here a[0] = 1 which is the smallest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -397,7 +405,6 @@ public ArgMin argMin(Operand input, * # here a[0] = 1 which is the smallest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -426,7 +433,6 @@ public ArgMin argMin(Operand input, * tf.math.asin(y) # [1.047, 0.785] = x * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Asin} output and operands * @return a new instance of Asin @@ -445,7 +451,6 @@ public Asin asin(Operand x) { * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Asinh} output and operands * @return a new instance of Asinh @@ -469,7 +474,6 @@ public Asinh asinh(Operand x) { * tf.math.atan(y) # [1.047, 0.785] = x * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Atan} output and operands * @return a new instance of Atan @@ -497,7 +501,6 @@ public Atan atan(Operand x) { * * * - * @param data type for {@code z} output * @param y The y value * @param x The x value * @param data type for {@code Atan2} output and operands @@ -519,7 +522,6 @@ public Atan2 atan2(Operand y, Operand x) { * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Atanh} output and operands * @return a new instance of Atanh @@ -528,6 +530,50 @@ public Atanh atanh(Operand x) { return Atanh.create(scope, x); } + /** + * The BesselI0 operation + * + * @param x The x value + * @param data type for {@code BesselI0} output and operands + * @return a new instance of BesselI0 + */ + public BesselI0 besselI0(Operand x) { + return BesselI0.create(scope, x); + } + + /** + * The BesselI0e operation + * + * @param x The x value + * @param data type for {@code BesselI0e} output and operands + * @return a new instance of BesselI0e + */ + public BesselI0e besselI0e(Operand x) { + return BesselI0e.create(scope, x); + } + + /** + * The BesselI1 operation + * + * @param x The x value + * @param data type for {@code BesselI1} output and operands + * @return a new instance of BesselI1 + */ + public BesselI1 besselI1(Operand x) { + return BesselI1.create(scope, x); + } + + /** + * The BesselI1e operation + * + * @param x The x value + * @param data type for {@code BesselI1e} output and operands + * @return a new instance of BesselI1e + */ + public BesselI1e besselI1e(Operand x) { + return BesselI1e.create(scope, x); + } + /** * Compute the regularized incomplete beta integral \(I_x(a, b)\). * The regularized incomplete beta integral is defined as: @@ -537,7 +583,6 @@ public Atanh atanh(Operand x) { *

is the incomplete beta function and \(B(a, b)\) is the complete * beta function. * - * @param data type for {@code z} output * @param a The a value * @param b The b value * @param x The x value @@ -557,7 +602,6 @@ public Betainc betainc(Operand a, Operand b, Operan * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code bins} output * @param arr int32 {@code Tensor}. * @param sizeOutput non-negative int32 scalar {@code Tensor}. * @param weights is an int32, int64, float32, or float64 {@code Tensor} with the same @@ -574,7 +618,6 @@ public Bincount bincount(Operand arr, Operand data type for {@code y} output * @param x The x value * @param data type for {@code Ceil} output and operands * @return a new instance of Ceil @@ -600,7 +643,6 @@ public Ceil ceil(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @return a new instance of ComplexAbs, with default output types */ @@ -625,7 +667,6 @@ public ComplexAbs complexAbs(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @param Tout The value of the Tout attribute * @param data type for {@code ComplexAbs} output and operands @@ -648,7 +689,6 @@ public ComplexAbs complexAbs(Operand x, * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Conj} output and operands * @return a new instance of Conj @@ -668,7 +708,6 @@ public Conj conj(Operand input) { * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Cos} output and operands * @return a new instance of Cos @@ -687,7 +726,6 @@ public Cos cos(Operand x) { * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Cosh} output and operands * @return a new instance of Cosh @@ -719,7 +757,6 @@ public Cosh cosh(Operand x) { * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * * - * @param data type for {@code out} output * @param x A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. @@ -757,7 +794,6 @@ public Cumprod cumprod(Operand x, Operand * - * @param data type for {@code out} output * @param x A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. @@ -772,6 +808,37 @@ public Cumsum cumsum(Operand x, Operand + * tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] + * + *

By setting the {@code exclusive} kwarg to {@code True}, an exclusive cumulative log-sum-exp is + * performed instead: + *

+   *  tf.cumulative_logsumexp([a, b, c], exclusive=True)  # => [-inf, a, log(exp(a) * exp(b))]
+   *  
+ *

Note that the neutral element of the log-sum-exp operation is {@code -inf}, + * however, for performance reasons, the minimal value representable by the + * floating point type is used instead. + *

By setting the {@code reverse} kwarg to {@code True}, the cumulative log-sum-exp is performed in the + * opposite direction. + * + * @param x A {@code Tensor}. Must be one of the following types: {@code float16}, {@code float32}, {@code float64}. + * @param axis A {@code Tensor} of type {@code int32} (default: 0). Must be in the range + * {@code [-rank(x), rank(x))}. + * @param options carries optional attribute values + * @param data type for {@code CumulativeLogsumexp} output and operands + * @return a new instance of CumulativeLogsumexp + */ + public CumulativeLogsumexp cumulativeLogsumexp(Operand x, + Operand axis, CumulativeLogsumexp.Options... options) { + return CumulativeLogsumexp.create(scope, x, axis, options); + } + /** * Counts the number of occurrences of each value in an integer array. * Outputs a vector with length {@code size} and the same dtype as {@code weights}. If @@ -781,7 +848,6 @@ public Cumsum cumsum(Operand x, OperandValues in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param input 1D or 2D int {@code Tensor}. * @param sizeOutput non-negative int scalar {@code Tensor}. * @param weights is an int32, int64, float32, or float64 {@code Tensor} with the same @@ -801,7 +867,6 @@ public DenseBincount denseBincount(Ope * Computes Psi, the derivative of Lgamma (the log of the absolute value of * {@code Gamma(x)}), element-wise. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Digamma} output and operands * @return a new instance of Digamma @@ -815,7 +880,6 @@ public Digamma digamma(Operand x) { * NOTE: {@code math.Div} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Div} output and operands @@ -830,7 +894,6 @@ public Div div(Operand x, Operand y) { * NOTE: {@code math.DivNoNan} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code DivNoNan} output and operands @@ -867,7 +930,6 @@ public Equal equal(Operand x, Operand y, Equal.Options.. /** * Computes the Gauss error function of {@code x} element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erf} output and operands * @return a new instance of Erf @@ -879,7 +941,6 @@ public Erf erf(Operand x) { /** * Computes the complementary error function of {@code x} element-wise. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erfc} output and operands * @return a new instance of Erfc @@ -891,7 +952,6 @@ public Erfc erfc(Operand x) { /** * The Erfinv operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erfinv} output and operands * @return a new instance of erfinv @@ -924,7 +984,6 @@ public erfinv erfinv(Operand x) { * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Exp} output and operands * @return a new instance of Exp @@ -948,7 +1007,6 @@ public Exp exp(Operand x) { * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Expm1} output and operands * @return a new instance of Expm1 @@ -969,7 +1027,6 @@ public Fact fact() { /** * Returns element-wise largest integer not greater than x. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Floor} output and operands * @return a new instance of Floor @@ -983,7 +1040,6 @@ public Floor floor(Operand x) { * NOTE: {@code math.FloorDiv} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code FloorDiv} output and operands @@ -1001,7 +1057,6 @@ public FloorDiv floorDiv(Operand x, Operand y) { *

NOTE: {@code math.FloorMod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code FloorMod} output and operands @@ -1069,7 +1124,6 @@ public GreaterEqual greaterEqual(Operand x, Operand y) *

Note, above {@code Q(a, x)} ({@code Igammac}) is the upper regularized complete * Gamma function. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Igamma} output and operands @@ -1079,6 +1133,18 @@ public Igamma igamma(Operand a, Operand x) { return Igamma.create(scope, a, x); } + /** + * Computes the gradient of {@code igamma(a, x)} wrt {@code a}. + * + * @param a The a value + * @param x The x value + * @param data type for {@code IgammaGradA} output and operands + * @return a new instance of IgammaGradA + */ + public IgammaGradA igammaGradA(Operand a, Operand x) { + return IgammaGradA.create(scope, a, x); + } + /** * Compute the upper regularized incomplete Gamma function {@code Q(a, x)}. * The upper regularized incomplete Gamma function is defined as: @@ -1089,7 +1155,6 @@ public Igamma igamma(Operand a, Operand x) { *

Note, above {@code P(a, x)} ({@code Igamma}) is the lower regularized complete * Gamma function. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Igammac} output and operands @@ -1111,7 +1176,6 @@ public Igammac igammac(Operand a, Operand x) { * tf.imag(input) ==> [4.75, 5.75] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Imag, with default output types */ @@ -1131,7 +1195,6 @@ public Imag imag(Operand input) { * tf.imag(input) ==> [4.75, 5.75] * * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Imag} output and operands @@ -1155,7 +1218,6 @@ public Imag imag(Operand input, Class * invert_permutation(x) ==> [2, 4, 3, 0, 1] * * - * @param data type for {@code y} output * @param x 1-D. * @param data type for {@code InvertPermutation} output and operands * @return a new instance of InvertPermutation @@ -1276,7 +1338,6 @@ public LessEqual lessEqual(Operand x, Operand y) { * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Lgamma} output and operands * @return a new instance of Lgamma @@ -1294,7 +1355,6 @@ public Lgamma lgamma(Operand x) { * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Log} output and operands * @return a new instance of Log @@ -1312,7 +1372,6 @@ public Log log(Operand x) { * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Log1p} output and operands * @return a new instance of Log1p @@ -1362,7 +1421,6 @@ public LogicalOr logicalOr(Operand x, Operand y) { * NOTE: {@code math.Maximum} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Maximum} output and operands @@ -1379,7 +1437,6 @@ public Maximum maximum(Operand x, Operand y) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -1397,7 +1454,6 @@ public Mean mean(Operand input, OperandNOTE: {@code math.Minimum} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Minimum} output and operands @@ -1414,7 +1470,6 @@ public Minimum minimum(Operand x, Operand y) { *

NOTE: {@code math.Mod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Mod} output and operands @@ -1429,7 +1484,6 @@ public Mod mod(Operand x, Operand y) { * NOTE: {@code math.Mul} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Mul} output and operands @@ -1444,7 +1498,6 @@ public Mul mul(Operand x, Operand y) { * NOTE: {@code math.MulNoNan} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code MulNoNan} output and operands @@ -1457,7 +1510,6 @@ public MulNoNan mulNoNan(Operand x, Operand y) { /** * The Ndtri operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Ndtri} output and operands * @return a new instance of Ndtri @@ -1470,7 +1522,6 @@ public Ndtri ndtri(Operand x) { * Computes numerical negative value element-wise. * I.e., \(y = -x\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Neg} output and operands * @return a new instance of Neg @@ -1487,7 +1538,6 @@ public Neg neg(Operand x) { * Equivalent to C++ std::nextafter function. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param x1 The x1 value * @param x2 The x2 value * @param data type for {@code NextAfter} output and operands @@ -1520,7 +1570,6 @@ public NotEqual notEqual(Operand x, Operand y, *

where \(\psi(x)\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Polygamma} output and operands @@ -1555,7 +1604,6 @@ public PopulationCount populationCount(Operand x) { * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Pow} output and operands @@ -1568,7 +1616,6 @@ public Pow pow(Operand x, Operand y) { /** * Returns x + y element-wise, working on quantized buffers. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param minX The float value that the lowest quantized {@code x} value represents. @@ -1588,7 +1635,6 @@ public QuantizedAdd quantizedAdd(Operand data type for {@code z} output * @param x The x value * @param y The y value * @param minX The float value that the lowest quantized {@code x} value represents. @@ -1617,7 +1663,6 @@ public QuantizedMul quantizedMul(Operand * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Real, with default output types */ @@ -1637,7 +1682,6 @@ public Real real(Operand input) { * tf.real(input) ==> [-2.25, 3.25] * * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Real} output and operands @@ -1653,7 +1697,6 @@ public Real real(Operand input, Class *

NOTE: {@code Div} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code RealDiv} output and operands @@ -1667,7 +1710,6 @@ public RealDiv realDiv(Operand x, Operand y) { * Computes the reciprocal of x element-wise. * I.e., \(y = 1 / x\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Reciprocal} output and operands * @return a new instance of Reciprocal @@ -1676,6 +1718,55 @@ public Reciprocal reciprocal(Operand x) { return Reciprocal.create(scope, x); } + /** + * Computes the gradient for the inverse of {@code x} wrt its input. + * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} + * is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code ReciprocalGrad} output and operands + * @return a new instance of ReciprocalGrad + */ + public ReciprocalGrad reciprocalGrad(Operand y, Operand dy) { + return ReciprocalGrad.create(scope, y, dy); + } + + /** + * Computes requantization range per channel. + * + * @param input The original input tensor. + * @param inputMin The minimum value of the input tensor + * @param inputMax The maximum value of the input tensor. + * @param clipValueMax The maximum value of the output that needs to be clipped. + * Example: set this to 6 for Relu6. + * @return a new instance of RequantizationRangePerChannel + */ + public RequantizationRangePerChannel requantizationRangePerChannel( + Operand input, Operand inputMin, Operand inputMax, + Float clipValueMax) { + return RequantizationRangePerChannel.create(scope, input, inputMin, inputMax, clipValueMax); + } + + /** + * Requantizes input with min and max values known per channel. + * + * @param input The original input tensor. + * @param inputMin The minimum value of the input tensor + * @param inputMax The maximum value of the input tensor. + * @param requestedOutputMin The minimum value of the output tensor requested. + * @param requestedOutputMax The maximum value of the output tensor requested. + * @param outType The quantized type of output tensor that needs to be converted. + * @param data type for {@code RequantizePerChannel} output and operands + * @return a new instance of RequantizePerChannel + */ + public RequantizePerChannel requantizePerChannel( + Operand input, Operand inputMin, Operand inputMax, + Operand requestedOutputMin, Operand requestedOutputMax, + Class outType) { + return RequantizePerChannel.create(scope, input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, outType); + } + /** * Returns element-wise integer closest to x. * If the result is midway between two representable values, @@ -1687,7 +1778,6 @@ public Reciprocal reciprocal(Operand x) { * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Rint} output and operands * @return a new instance of Rint @@ -1701,7 +1791,6 @@ public Rint rint(Operand x) { * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Round} output and operands * @return a new instance of Round @@ -1714,7 +1803,6 @@ public Round round(Operand x) { * Computes reciprocal of square root of x element-wise. * I.e., \(y = 1 / \sqrt{x}\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Rsqrt} output and operands * @return a new instance of Rsqrt @@ -1723,6 +1811,20 @@ public Rsqrt rsqrt(Operand x) { return Rsqrt.create(scope, x); } + /** + * Computes the gradient for the rsqrt of {@code x} wrt its input. + * Specifically, {@code grad = dy * -0.5 * y^3}, where {@code y = rsqrt(x)}, and {@code dy} + * is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code RsqrtGrad} output and operands + * @return a new instance of RsqrtGrad + */ + public RsqrtGrad rsqrtGrad(Operand y, Operand dy) { + return RsqrtGrad.create(scope, y, dy); + } + /** * Computes the maximum along segments of a tensor. * Read @@ -1764,7 +1866,6 @@ public Rsqrt rsqrt(Operand x) { * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1811,7 +1912,6 @@ public SegmentMax segmentMax(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1866,7 +1966,6 @@ public SegmentMean segmentMean(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1915,7 +2014,6 @@ public SegmentMin segmentMin(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1941,9 +2039,7 @@ public SegmentProd segmentProd(Operand data, * that {@code segment_ids[j] == i}. *

If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. *

Note that this op is currently only supported with jit_compile=True. - * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1963,7 +2059,6 @@ public SegmentSum segmentSum(Operand data, * Computes sigmoid of {@code x} element-wise. * Specifically, {@code y = 1 / (1 + exp(-x))}. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sigmoid} output and operands * @return a new instance of Sigmoid @@ -1972,6 +2067,20 @@ public Sigmoid sigmoid(Operand x) { return Sigmoid.create(scope, x); } + /** + * Computes the gradient of the sigmoid of {@code x} wrt its input. + * Specifically, {@code grad = dy * y * (1 - y)}, where {@code y = sigmoid(x)}, and + * {@code dy} is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code SigmoidGrad} output and operands + * @return a new instance of SigmoidGrad + */ + public SigmoidGrad sigmoidGrad(Operand y, Operand dy) { + return SigmoidGrad.create(scope, y, dy); + } + /** * Returns an element-wise indication of the sign of a number. * {@code y = sign(x) = -1} if {@code x < 0}; 0 if {@code x == 0}; 1 if {@code x > 0}. @@ -1986,7 +2095,6 @@ public Sigmoid sigmoid(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sign} output and operands * @return a new instance of Sign @@ -2005,7 +2113,6 @@ public Sign sign(Operand x) { * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sin} output and operands * @return a new instance of Sin @@ -2024,7 +2131,6 @@ public Sin sin(Operand x) { * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sinh} output and operands * @return a new instance of Sinh @@ -2033,10 +2139,45 @@ public Sinh sinh(Operand x) { return Sinh.create(scope, x); } + /** + * Generates points from the Sobol sequence. + * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension + * {@code dim}. Skips the first {@code skip} samples. + * + * @param dim Positive scalar {@code Tensor} representing each sample's dimension. + * @param numResults Positive scalar {@code Tensor} of dtype int32. The number of Sobol points to return + * in the output. + * @param skip Positive scalar {@code Tensor} of dtype int32. The number of initial points of the + * Sobol sequence to skip. + * @return a new instance of SobolSample, with default output types + */ + public SobolSample sobolSample(Operand dim, Operand numResults, + Operand skip) { + return SobolSample.create(scope, dim, numResults, skip); + } + + /** + * Generates points from the Sobol sequence. + * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension + * {@code dim}. Skips the first {@code skip} samples. + * + * @param dim Positive scalar {@code Tensor} representing each sample's dimension. + * @param numResults Positive scalar {@code Tensor} of dtype int32. The number of Sobol points to return + * in the output. + * @param skip Positive scalar {@code Tensor} of dtype int32. The number of initial points of the + * Sobol sequence to skip. + * @param dtype The type of the sample. One of: {@code float32} or {@code float64}. + * @param data type for {@code SobolSample} output and operands + * @return a new instance of SobolSample + */ + public SobolSample sobolSample(Operand dim, + Operand numResults, Operand skip, Class dtype) { + return SobolSample.create(scope, dim, numResults, skip, dtype); + } + /** * The Softplus operation * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Softplus} output and operands * @return a new instance of Softplus @@ -2045,11 +2186,23 @@ public Softplus softplus(Operand features) { return Softplus.create(scope, features); } + /** + * Computes softplus gradients for a softplus operation. + * + * @param gradients The backpropagated gradients to the corresponding softplus operation. + * @param features The features passed as input to the corresponding softplus operation. + * @param data type for {@code SoftplusGrad} output and operands + * @return a new instance of SoftplusGrad + */ + public SoftplusGrad softplusGrad(Operand gradients, + Operand features) { + return SoftplusGrad.create(scope, gradients, features); + } + /** * Computes square root of x element-wise. * I.e., \(y = \sqrt{x} = x^{1/2}\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sqrt} output and operands * @return a new instance of Sqrt @@ -2058,11 +2211,24 @@ public Sqrt sqrt(Operand x) { return Sqrt.create(scope, x); } + /** + * Computes the gradient for the sqrt of {@code x} wrt its input. + * Specifically, {@code grad = dy * 0.5 / y}, where {@code y = sqrt(x)}, and {@code dy} + * is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code SqrtGrad} output and operands + * @return a new instance of SqrtGrad + */ + public SqrtGrad sqrtGrad(Operand y, Operand dy) { + return SqrtGrad.create(scope, y, dy); + } + /** * Computes square of x element-wise. * I.e., \(y = x * x = x^2\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Square} output and operands * @return a new instance of Square @@ -2076,7 +2242,6 @@ public Square square(Operand x) { * NOTE: {@code math.SquaredDifference} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code SquaredDifference} output and operands @@ -2091,7 +2256,6 @@ public SquaredDifference squaredDifference(Operand x, Op * NOTE: {@code math.Sub} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Sub} output and operands @@ -2112,7 +2276,6 @@ public Sub sub(Operand x, Operand y) { * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Tan} output and operands * @return a new instance of Tan @@ -2138,7 +2301,6 @@ public Tan tan(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Tanh} output and operands * @return a new instance of Tanh @@ -2147,6 +2309,20 @@ public Tanh tanh(Operand x) { return Tanh.create(scope, x); } + /** + * Computes the gradient for the tanh of {@code x} wrt its input. + * Specifically, {@code grad = dy * (1 - y*y)}, where {@code y = tanh(x)}, and {@code dy} + * is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code TanhGrad} output and operands + * @return a new instance of TanhGrad + */ + public TanhGrad tanhGrad(Operand y, Operand dy) { + return TanhGrad.create(scope, y, dy); + } + /** * Returns x / y element-wise, rounded towards zero. * Truncation designates that negative numbers will round fractional quantities @@ -2156,7 +2332,6 @@ public Tanh tanh(Operand x) { *

NOTE: {@code math.TruncateDiv} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code TruncateDiv} output and operands @@ -2172,7 +2347,6 @@ public TruncateDiv truncateDiv(Operand x, Operand y) *

NOTE: {@code math.TruncateMod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code TruncateMod} output and operands @@ -2182,6 +2356,60 @@ public TruncateMod truncateMod(Operand x, Operand y return TruncateMod.create(scope, x, y); } + /** + * Perform quantized add of quantized Tensor {@code lhs} and quantized Tensor {@code rhs} to make quantized {@code output}. + * Given quantized {@code lhs} and quantized {@code rhs}, performs quantized add on {@code lhs} and {@code rhs} to make quantized {@code output}. + *

{@code math.UniformQuantizedAdd} follows Numpy broadcasting rules. + * The two input array shapes are compared element-wise. + * Starting with the trailing dimensions, the two dimensions either have to be equal or one of them needs to be 1. + *

{@code lhs} and {@code rhs} must be quantized Tensor, where data value is quantized using the formula: + *

+   *  quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val)
+   *  
+ *

{@code output} is also quantized, using the same formula. + *

If {@code lhs} and {@code output} is both per-axis quantized, the quantization axis must match. + * Also, if {@code rhs} and {@code output} is both per-axis quantized, the quantization axis must match. + * Match means the axis must match when adding, regarding the broadcasting. + * i.e. For both operands {@code lhs} and {@code rhs}, + * if {@code operand.quantization_axis} >= 0 and {@code output.quantization_axis} >= 0, + * {@code operand.dims} - {@code operand.quantization_axis} must be equal to {@code output.dims} - {@code output.quantization_axis}. + * + * @param lhs Must be a quantized tensor. + * @param rhs Must be a quantized tensor. + * @param lhsScales The float value(s) used as scale factors when quantizing the original data that {@code lhs} represents. + * @param lhsZeroPoints The int32 value(s) used as zero points when quantizing original data that {@code lhs} represents. + * Must have same shape with {@code lhs_scales}. + * @param rhsScales The float value(s) used as scale factors when quantizing the original data that {@code rhs} represents. + * @param rhsZeroPoints The int32 value(s) used as zero points when quantizing original data that {@code rhs} represents. + * Must have same shape with {@code rhs_scales}. + * @param outputScales The float value(s) to use as scale factors when quantizing original data that {@code output} represents. + * @param outputZeroPoints The int32 value(s) used as zero points when quantizing original data that output represents. + * Must have same shape with {@code output_scales}. + * @param lhsQuantizationMinVal The min value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param lhsQuantizationMaxVal The max value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. + * @param rhsQuantizationMinVal The min value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param rhsQuantizationMaxVal The max value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. + * @param outputQuantizationMinVal The min value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param outputQuantizationMaxVal The max value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedAdd} output and operands + * @return a new instance of UniformQuantizedAdd + */ + public UniformQuantizedAdd uniformQuantizedAdd(Operand lhs, + Operand rhs, Operand lhsScales, Operand lhsZeroPoints, + Operand rhsScales, Operand rhsZeroPoints, Operand outputScales, + Operand outputZeroPoints, Long lhsQuantizationMinVal, Long lhsQuantizationMaxVal, + Long rhsQuantizationMinVal, Long rhsQuantizationMaxVal, Long outputQuantizationMinVal, + Long outputQuantizationMaxVal, UniformQuantizedAdd.Options... options) { + return UniformQuantizedAdd.create(scope, lhs, rhs, lhsScales, lhsZeroPoints, rhsScales, rhsZeroPoints, outputScales, outputZeroPoints, lhsQuantizationMinVal, lhsQuantizationMaxVal, rhsQuantizationMinVal, rhsQuantizationMaxVal, outputQuantizationMinVal, outputQuantizationMaxVal, options); + } + /** * Computes the maximum along segments of a tensor. * Read @@ -2217,7 +2445,6 @@ public TruncateMod truncateMod(Operand x, Operand y * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2264,7 +2491,6 @@ public UnsortedSegmentMax unsortedSegmentMax(Operand d * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2310,7 +2536,6 @@ public UnsortedSegmentMin unsortedSegmentMin(Operand d * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2359,7 +2584,6 @@ public UnsortedSegmentProd unsortedSegmentProd(Operand d * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2377,7 +2601,6 @@ public UnsortedSegmentSum unsortedSegmentSum(Operand dat /** * Returns 0 if x == 0, and x / y otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xdivy} output and operands @@ -2390,7 +2613,6 @@ public Xdivy xdivy(Operand x, Operand y) { /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xlog1py} output and operands @@ -2403,7 +2625,6 @@ public Xlog1py xlog1py(Operand x, Operand y) { /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xlogy} output and operands @@ -2418,7 +2639,6 @@ public Xlogy xlogy(Operand x, Operand y) { * The Hurwitz zeta function is defined as: *

\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) * - * @param data type for {@code z} output * @param x The x value * @param q The q value * @param data type for {@code Zeta} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java new file mode 100644 index 00000000000..e486615af1b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java @@ -0,0 +1,200 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.math.special.BesselJ0; +import org.tensorflow.op.math.special.BesselJ1; +import org.tensorflow.op.math.special.BesselK0; +import org.tensorflow.op.math.special.BesselK0e; +import org.tensorflow.op.math.special.BesselK1; +import org.tensorflow.op.math.special.BesselK1e; +import org.tensorflow.op.math.special.BesselY0; +import org.tensorflow.op.math.special.BesselY1; +import org.tensorflow.op.math.special.Dawsn; +import org.tensorflow.op.math.special.Expint; +import org.tensorflow.op.math.special.FresnelCos; +import org.tensorflow.op.math.special.FresnelSin; +import org.tensorflow.op.math.special.Spence; +import org.tensorflow.types.family.TNumber; + +/** + * An API for building {@code math.special} operations as {@link Op Op}s + * + * @see Ops + */ +public final class MathSpecialOps { + private final Scope scope; + + private final Ops ops; + + MathSpecialOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * The BesselJ0 operation + * + * @param x The x value + * @param data type for {@code BesselJ0} output and operands + * @return a new instance of BesselJ0 + */ + public BesselJ0 besselJ0(Operand x) { + return BesselJ0.create(scope, x); + } + + /** + * The BesselJ1 operation + * + * @param x The x value + * @param data type for {@code BesselJ1} output and operands + * @return a new instance of BesselJ1 + */ + public BesselJ1 besselJ1(Operand x) { + return BesselJ1.create(scope, x); + } + + /** + * The BesselK0 operation + * + * @param x The x value + * @param data type for {@code BesselK0} output and operands + * @return a new instance of BesselK0 + */ + public BesselK0 besselK0(Operand x) { + return BesselK0.create(scope, x); + } + + /** + * The BesselK0e operation + * + * @param x The x value + * @param data type for {@code BesselK0e} output and operands + * @return a new instance of BesselK0e + */ + public BesselK0e besselK0e(Operand x) { + return BesselK0e.create(scope, x); + } + + /** + * The BesselK1 operation + * + * @param x The x value + * @param data type for {@code BesselK1} output and operands + * @return a new instance of BesselK1 + */ + public BesselK1 besselK1(Operand x) { + return BesselK1.create(scope, x); + } + + /** + * The BesselK1e operation + * + * @param x The x value + * @param data type for {@code BesselK1e} output and operands + * @return a new instance of BesselK1e + */ + public BesselK1e besselK1e(Operand x) { + return BesselK1e.create(scope, x); + } + + /** + * The BesselY0 operation + * + * @param x The x value + * @param data type for {@code BesselY0} output and operands + * @return a new instance of BesselY0 + */ + public BesselY0 besselY0(Operand x) { + return BesselY0.create(scope, x); + } + + /** + * The BesselY1 operation + * + * @param x The x value + * @param data type for {@code BesselY1} output and operands + * @return a new instance of BesselY1 + */ + public BesselY1 besselY1(Operand x) { + return BesselY1.create(scope, x); + } + + /** + * The Dawsn operation + * + * @param x The x value + * @param data type for {@code Dawsn} output and operands + * @return a new instance of Dawsn + */ + public Dawsn dawsn(Operand x) { + return Dawsn.create(scope, x); + } + + /** + * The Expint operation + * + * @param x The x value + * @param data type for {@code Expint} output and operands + * @return a new instance of Expint + */ + public Expint expint(Operand x) { + return Expint.create(scope, x); + } + + /** + * The FresnelCos operation + * + * @param x The x value + * @param data type for {@code FresnelCos} output and operands + * @return a new instance of FresnelCos + */ + public FresnelCos fresnelCos(Operand x) { + return FresnelCos.create(scope, x); + } + + /** + * The FresnelSin operation + * + * @param x The x value + * @param data type for {@code FresnelSin} output and operands + * @return a new instance of FresnelSin + */ + public FresnelSin fresnelSin(Operand x) { + return FresnelSin.create(scope, x); + } + + /** + * The Spence operation + * + * @param x The x value + * @param data type for {@code Spence} output and operands + * @return a new instance of Spence + */ + public Spence spence(Operand x) { + return Spence.create(scope, x); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 7ac1a318348..535af3cda71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -22,10 +22,14 @@ import org.tensorflow.op.nn.AvgPool; import org.tensorflow.op.nn.AvgPool3d; import org.tensorflow.op.nn.AvgPool3dGrad; +import org.tensorflow.op.nn.AvgPoolGrad; import org.tensorflow.op.nn.BatchNormWithGlobalNormalization; import org.tensorflow.op.nn.BatchNormWithGlobalNormalizationGrad; import org.tensorflow.op.nn.BiasAdd; import org.tensorflow.op.nn.BiasAddGrad; +import org.tensorflow.op.nn.BlockLSTM; +import org.tensorflow.op.nn.BlockLSTMGrad; +import org.tensorflow.op.nn.CTCLossV2; import org.tensorflow.op.nn.ComputeAccidentalHits; import org.tensorflow.op.nn.Conv; import org.tensorflow.op.nn.Conv2d; @@ -37,6 +41,8 @@ import org.tensorflow.op.nn.CtcBeamSearchDecoder; import org.tensorflow.op.nn.CtcGreedyDecoder; import org.tensorflow.op.nn.CtcLoss; +import org.tensorflow.op.nn.CudnnRNN; +import org.tensorflow.op.nn.CudnnRNNBackprop; import org.tensorflow.op.nn.CudnnRNNCanonicalToParams; import org.tensorflow.op.nn.CudnnRNNParamsToCanonical; import org.tensorflow.op.nn.CudnnRnnParamsSize; @@ -50,18 +56,28 @@ import org.tensorflow.op.nn.Dilation2dBackpropFilter; import org.tensorflow.op.nn.Dilation2dBackpropInput; import org.tensorflow.op.nn.Elu; +import org.tensorflow.op.nn.EluGrad; import org.tensorflow.op.nn.FixedUnigramCandidateSampler; import org.tensorflow.op.nn.FractionalAvgPool; +import org.tensorflow.op.nn.FractionalAvgPoolGrad; import org.tensorflow.op.nn.FractionalMaxPool; +import org.tensorflow.op.nn.FractionalMaxPoolGrad; import org.tensorflow.op.nn.FusedBatchNorm; import org.tensorflow.op.nn.FusedBatchNormGrad; import org.tensorflow.op.nn.FusedPadConv2d; import org.tensorflow.op.nn.FusedResizeAndPadConv2d; +import org.tensorflow.op.nn.GRUBlockCell; +import org.tensorflow.op.nn.GRUBlockCellGrad; import org.tensorflow.op.nn.InTopK; +import org.tensorflow.op.nn.InvGrad; +import org.tensorflow.op.nn.IsotonicRegression; import org.tensorflow.op.nn.L2Loss; +import org.tensorflow.op.nn.LSTMBlockCell; +import org.tensorflow.op.nn.LSTMBlockCellGrad; import org.tensorflow.op.nn.LeakyRelu; import org.tensorflow.op.nn.LearnedUnigramCandidateSampler; import org.tensorflow.op.nn.LocalResponseNormalization; +import org.tensorflow.op.nn.LocalResponseNormalizationGrad; import org.tensorflow.op.nn.LogSoftmax; import org.tensorflow.op.nn.MaxPool; import org.tensorflow.op.nn.MaxPool3d; @@ -70,12 +86,28 @@ import org.tensorflow.op.nn.MaxPoolGrad; import org.tensorflow.op.nn.MaxPoolGradGrad; import org.tensorflow.op.nn.MaxPoolGradGradWithArgmax; +import org.tensorflow.op.nn.MaxPoolGradWithArgmax; import org.tensorflow.op.nn.MaxPoolWithArgmax; import org.tensorflow.op.nn.NthElement; import org.tensorflow.op.nn.QuantizedAvgPool; import org.tensorflow.op.nn.QuantizedBatchNormWithGlobalNormalization; import org.tensorflow.op.nn.QuantizedBiasAdd; +import org.tensorflow.op.nn.QuantizedConv2DAndRelu; +import org.tensorflow.op.nn.QuantizedConv2DAndReluAndRequantize; +import org.tensorflow.op.nn.QuantizedConv2DAndRequantize; +import org.tensorflow.op.nn.QuantizedConv2DPerChannel; +import org.tensorflow.op.nn.QuantizedConv2DWithBias; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasAndRelu; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasAndReluAndRequantize; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasAndRequantize; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasSignedSumAndReluAndRequantize; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasSumAndRelu; +import org.tensorflow.op.nn.QuantizedConv2DWithBiasSumAndReluAndRequantize; import org.tensorflow.op.nn.QuantizedConv2d; +import org.tensorflow.op.nn.QuantizedDepthwiseConv2D; +import org.tensorflow.op.nn.QuantizedDepthwiseConv2DWithBias; +import org.tensorflow.op.nn.QuantizedDepthwiseConv2DWithBiasAndRelu; +import org.tensorflow.op.nn.QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize; import org.tensorflow.op.nn.QuantizedInstanceNorm; import org.tensorflow.op.nn.QuantizedMaxPool; import org.tensorflow.op.nn.QuantizedRelu; @@ -83,14 +115,20 @@ import org.tensorflow.op.nn.QuantizedReluX; import org.tensorflow.op.nn.Relu; import org.tensorflow.op.nn.Relu6; +import org.tensorflow.op.nn.Relu6Grad; +import org.tensorflow.op.nn.ReluGrad; import org.tensorflow.op.nn.Selu; +import org.tensorflow.op.nn.SeluGrad; import org.tensorflow.op.nn.Softmax; import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.op.nn.Softsign; +import org.tensorflow.op.nn.SoftsignGrad; import org.tensorflow.op.nn.SpaceToBatch; import org.tensorflow.op.nn.SpaceToDepth; import org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits; import org.tensorflow.op.nn.TopK; +import org.tensorflow.op.nn.UniformQuantizedConvolution; +import org.tensorflow.op.nn.UniformQuantizedConvolutionHybrid; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -100,7 +138,7 @@ /** * An API for building {@code nn} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class NnOps { private final Scope scope; @@ -117,7 +155,6 @@ public final class NnOps { * Each entry in {@code output} is the mean of the corresponding size {@code ksize} * window in {@code value}. * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param ksize The size of the sliding window for each dimension of {@code value}. * @param strides The stride of the sliding window for each dimension of {@code value}. @@ -136,7 +173,6 @@ public AvgPool avgPool(Operand value, List ksize * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in * {@code value}. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. @@ -155,7 +191,6 @@ public AvgPool3d avgPool3d(Operand input, List k /** * Computes gradients of average pooling function. * - * @param data type for {@code output} output * @param origInputShape The original input dimensions. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -173,11 +208,29 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn return AvgPool3dGrad.create(scope, origInputShape, grad, ksize, strides, padding, options); } + /** + * Computes gradients of the average pooling function. + * + * @param origInputShape 1-D. Shape of the original input to {@code avg_pool}. + * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. + * the output of {@code avg_pool}. + * @param ksize The size of the sliding window for each dimension of the input. + * @param strides The stride of the sliding window for each dimension of the input. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for {@code AvgPoolGrad} output and operands + * @return a new instance of AvgPoolGrad + */ + public AvgPoolGrad avgPoolGrad(Operand origInputShape, + Operand grad, List ksize, List strides, String padding, + AvgPoolGrad.Options... options) { + return AvgPoolGrad.create(scope, origInputShape, grad, ksize, strides, padding, options); + } + /** * Batch normalization. * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. * - * @param data type for {@code result} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -206,7 +259,6 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal * Gradients for batch normalization. * This op is deprecated. See {@code tf.nn.batch_normalization}. * - * @param data type for {@code dx} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -235,7 +287,6 @@ public BatchNormWithGlobalNormalizationGrad batchNormWithGl * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. * Broadcasting is supported, so {@code value} may have any number of dimensions. * - * @param data type for {@code output} output * @param value Any number of dimensions. * @param bias 1-D with size the last dimension of {@code value}. * @param options carries optional attribute values @@ -253,7 +304,6 @@ public BiasAdd biasAdd(Operand value, Operand bias, * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param data type for {@code output} output * @param outBackprop Any number of dimensions. * @param options carries optional attribute values * @param data type for {@code BiasAddGrad} output and operands @@ -264,6 +314,104 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, return BiasAddGrad.create(scope, outBackprop, options); } + /** + * Computes the LSTM cell forward propagation for all the time steps. + * This is equivalent to applying LSTMBlockCell in a loop, like so: + *

+   *  for x1 in unpack(x):
+   *    i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
+   *      x1, cs_prev, h_prev, w, wci, wcf, wco, b)
+   *    cs_prev = cs1
+   *    h_prev = h1
+   *    i.append(i1)
+   *    cs.append(cs1)
+   *    f.append(f1)
+   *    o.append(o1)
+   *    ci.append(ci1)
+   *    co.append(co1)
+   *    h.append(h1)
+   *  return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
+   *
+   *  Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
+   *  this op uses IFCO. So in order for the following snippet to be equivalent
+   *  all gate-related outputs should be reordered.
+   *  
+ * + * @param seqLenMax Maximum time length actually used by this input. Outputs are padded + * with zeros beyond this length. + * @param x The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + * @param csPrev Value of the initial cell state. + * @param hPrev Initial output of cell (to be used for peephole). + * @param w The weight matrix. + * @param wci The weight matrix for input gate peephole connection. + * @param wcf The weight matrix for forget gate peephole connection. + * @param wco The weight matrix for output gate peephole connection. + * @param b The bias vector. + * @param options carries optional attribute values + * @param data type for {@code BlockLSTMV2} output and operands + * @return a new instance of BlockLSTM + */ + public BlockLSTM blockLSTM(Operand seqLenMax, Operand x, + Operand csPrev, Operand hPrev, Operand w, Operand wci, Operand wcf, + Operand wco, Operand b, BlockLSTM.Options... options) { + return BlockLSTM.create(scope, seqLenMax, x, csPrev, hPrev, w, wci, wcf, wco, b, options); + } + + /** + * Computes the LSTM cell backward propagation for the entire time sequence. + * This implementation is to be used in conjunction of BlockLSTMV2. + * + * @param seqLenMax Maximum time length actually used by this input. Outputs are padded + * with zeros beyond this length. + * @param x The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + * @param csPrev Value of the initial cell state. + * @param hPrev Initial output of cell (to be used for peephole). + * @param w The weight matrix. + * @param wci The weight matrix for input gate peephole connection. + * @param wcf The weight matrix for forget gate peephole connection. + * @param wco The weight matrix for output gate peephole connection. + * @param b The bias vector. + * @param i The input gate over the whole time sequence. + * @param cs The cell state before the tanh over the whole time sequence. + * @param f The forget gate over the whole time sequence. + * @param o The output gate over the whole time sequence. + * @param ci The cell input over the whole time sequence. + * @param co The cell after the tanh over the whole time sequence. + * @param h The output h vector over the whole time sequence. + * @param csGrad The current gradient of cs. + * @param hGrad The gradient of h vector. + * @param usePeephole Whether to use peephole weights. + * @param data type for {@code BlockLSTMGradV2} output and operands + * @return a new instance of BlockLSTMGrad + */ + public BlockLSTMGrad blockLSTMGrad(Operand seqLenMax, Operand x, + Operand csPrev, Operand hPrev, Operand w, Operand wci, Operand wcf, + Operand wco, Operand b, Operand i, Operand cs, Operand f, Operand o, + Operand ci, Operand co, Operand h, Operand csGrad, Operand hGrad, + Boolean usePeephole) { + return BlockLSTMGrad.create(scope, seqLenMax, x, csPrev, hPrev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, csGrad, hGrad, usePeephole); + } + + /** + * Calculates the CTC Loss (log probability) for each batch entry. Also calculates + * the gradient. This class performs the softmax operation for you, so inputs + * should be e.g. linear projections of outputs by an LSTM. + * + * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. Default blank + * label is 0 rather num_classes - 1. + * @param labelsIndices The indices of a {@code SparseTensor}. + * {@code labels_indices(i, :) == [b, t]} means {@code labels_values(i)} stores the id for + * {@code (batch b, time t)}. + * @param labelsValues The values (labels) associated with the given batch and time. + * @param sequenceLength A vector containing sequence lengths (batch). + * @param options carries optional attribute values + * @return a new instance of CTCLossV2 + */ + public CTCLossV2 cTCLossV2(Operand inputs, Operand labelsIndices, + Operand labelsValues, Operand sequenceLength, CTCLossV2.Options... options) { + return CTCLossV2.create(scope, inputs, labelsIndices, labelsValues, sequenceLength, options); + } + /** * Computes the ids of the positions in sampled_candidates that match true_labels. * When doing log-odds NCE, the result of this op should be passed through a @@ -287,7 +435,6 @@ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, * General function for computing a N-D convolution. It is required that * {@code 1 <= N <= 3}. * - * @param data type for {@code output} output * @param input Tensor of type T and shape {@code batch_shape + spatial_shape + [in_channels]} in the * case that {@code channels_last_format = true} or shape * {@code batch_shape + [in_channels] + spatial_shape} if {@code channels_last_format = false}. @@ -332,7 +479,6 @@ public Conv conv(Operand input, Operand filter, Lis *

Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * - * @param data type for {@code output} output * @param input A 4-D tensor. The dimension order is interpreted according to the value * of {@code data_format}, see below for details. * @param filter A 4-D tensor of shape @@ -353,7 +499,6 @@ public Conv2d conv2d(Operand input, Operand filter, /** * Computes the gradients of convolution with respect to the filter. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 4-D @@ -377,7 +522,6 @@ public Conv2dBackpropFilter conv2dBackpropFilter(Operand< /** * Computes the gradients of convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the shape of {@code input}, * where {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * @param filter 4-D with shape @@ -405,7 +549,6 @@ public Conv2dBackpropInput conv2dBackpropInput(OperandOur Conv3D implements a form of cross-correlation. * - * @param data type for {@code output} output * @param input Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. * @param filter Shape {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]}. {@code in_channels} must match between {@code input} and {@code filter}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each @@ -423,7 +566,6 @@ public Conv3d conv3d(Operand input, Operand filter, /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, in_channels]}. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 5-D @@ -446,7 +588,6 @@ public Conv3dBackpropFilter conv3dBackpropFilter(Operand< /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the tensor shape of {@code input}, * where {@code input} is a 5-D * {@code [batch, depth, rows, cols, in_channels]} tensor. @@ -474,7 +615,6 @@ public Conv3dBackpropInput conv3dBackpropInput( * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param data type for {@code log_probability} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param sequenceLength A vector containing sequence lengths, size {@code (batch)}. * @param beamWidth A scalar >= 0 (beam search beam width). @@ -500,7 +640,6 @@ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand< * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new * element is emitted. * - * @param data type for {@code log_probability} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param sequenceLength A vector containing sequence lengths, size {@code (batch_size)}. * @param options carries optional attribute values @@ -517,7 +656,6 @@ public CtcGreedyDecoder ctcGreedyDecoder(Operand input * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param data type for {@code loss} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param labelsIndices The indices of a {@code SparseTensor}. * {@code labels_indices(i, :) == [b, t]} means {@code labels_values(i)} stores the id for @@ -533,6 +671,134 @@ public CtcLoss ctcLoss(Operand inputs, Operand return CtcLoss.create(scope, inputs, labelsIndices, labelsValues, sequenceLength, options); } + /** + * A RNN backed by cuDNN. + * Computes the RNN from the input and initial states, with respect to the params + * buffer. Accepts one extra input "sequence_lengths" than CudnnRNN. + *

rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicates whether there is a linear projection between the input and + * the actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. Should be + * "unidirectional" or "bidirectional". + * dropout: Dropout probability. When set to 0., dropout is disabled. + * seed: The 1st part of a seed to initialize dropout. + * seed2: The 2nd part of a seed to initialize dropout. + * input: If time_major is true, this is a 3-D tensor with the shape of + * [seq_length, batch_size, input_size]. If time_major is false, the shape is + * [batch_size, seq_length, input_size]. + * input_h: If time_major is true, this is a 3-D tensor with the shape of + * [num_layer * dir, batch_size, num_units]. If time_major is false, the shape + * is [batch_size, num_layer * dir, num_units]. + * input_c: For LSTM, a 3-D tensor with the shape of + * [num_layer * dir, batch, num_units]. For other models, it is ignored. + * params: A 1-D tensor that contains the weights and biases in an opaque layout. + * The size must be created through CudnnRNNParamsSize, and initialized + * separately. Note that they might not be compatible across different + * generations. So it is a good idea to save and restore + * sequence_lengths: a vector of lengths of each input sequence. + * output: If time_major is true, this is a 3-D tensor with the shape of + * [seq_length, batch_size, dir * num_units]. If time_major is false, the + * shape is [batch_size, seq_length, dir * num_units]. + * output_h: The same shape has input_h. + * output_c: The same shape as input_c for LSTM. An empty tensor for other models. + * is_training: Indicates whether this operation is used for inference or + * training. + * time_major: Indicates whether the input/output format is time major or batch + * major. + * reserve_space: An opaque tensor that can be used in backprop calculation. It + * is only produced if is_training is true. + * + * @param input The input value + * @param inputH The inputH value + * @param inputC The inputC value + * @param params The params value + * @param sequenceLengths The sequenceLengths value + * @param options carries optional attribute values + * @param data type for {@code CudnnRNNV3} output and operands + * @return a new instance of CudnnRNN + */ + public CudnnRNN cudnnRNN(Operand input, Operand inputH, + Operand inputC, Operand params, Operand sequenceLengths, + CudnnRNN.Options... options) { + return CudnnRNN.create(scope, input, inputH, inputC, params, sequenceLengths, options); + } + + /** + * Backprop step of CudnnRNNV3. + * Compute the backprop of both data and weights in a RNN. Takes an extra + * "sequence_lengths" input than CudnnRNNBackprop. + *

rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicates whether there is a linear projection between the input and + * the actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. Should be + * "unidirectional" or "bidirectional". + * dropout: Dropout probability. When set to 0., dropout is disabled. + * seed: The 1st part of a seed to initialize dropout. + * seed2: The 2nd part of a seed to initialize dropout. + * input: If time_major is true, this is a 3-D tensor with the shape of + * [seq_length, batch_size, input_size]. If time_major is false, the shape is + * [batch_size, seq_length, input_size]. + * input_h: If time_major is true, this is a 3-D tensor with the shape of + * [num_layer * dir, batch_size, num_units]. If time_major is false, the shape + * is [batch_size, num_layer * dir, num_units]. + * input_c: For LSTM, a 3-D tensor with the shape of + * [num_layer * dir, batch, num_units]. For other models, it is ignored. + * params: A 1-D tensor that contains the weights and biases in an opaque layout. + * The size must be created through CudnnRNNParamsSize, and initialized + * separately. Note that they might not be compatible across different + * generations. So it is a good idea to save and restore + * sequence_lengths: a vector of lengths of each input sequence. + * output: If time_major is true, this is a 3-D tensor with the shape of + * [seq_length, batch_size, dir * num_units]. If time_major is false, the + * shape is [batch_size, seq_length, dir * num_units]. + * output_h: The same shape has input_h. + * output_c: The same shape as input_c for LSTM. An empty tensor for other models. + * output_backprop: A 3-D tensor with the same shape as output in the forward pass. + * output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + * pass. + * output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + * pass. + * time_major: Indicates whether the input/output format is time major or batch + * major. + * reserve_space: The same reserve_space produced in the forward operation. + * input_backprop: The backprop to input in the forward pass. Has the same shape + * as input. + * input_h_backprop: The backprop to input_h in the forward pass. Has the same + * shape as input_h. + * input_c_backprop: The backprop to input_c in the forward pass. Has the same + * shape as input_c. + * params_backprop: The backprop to the params buffer in the forward pass. Has the + * same shape as params. + * + * @param input The input value + * @param inputH The inputH value + * @param inputC The inputC value + * @param params The params value + * @param sequenceLengths The sequenceLengths value + * @param output The output value + * @param outputH The outputH value + * @param outputC The outputC value + * @param outputBackprop The outputBackprop value + * @param outputHBackprop The outputHBackprop value + * @param outputCBackprop The outputCBackprop value + * @param reserveSpace The reserveSpace value + * @param hostReserved The hostReserved value + * @param options carries optional attribute values + * @param data type for {@code CudnnRNNBackpropV3} output and operands + * @return a new instance of CudnnRNNBackprop + */ + public CudnnRNNBackprop cudnnRNNBackprop(Operand input, + Operand inputH, Operand inputC, Operand params, Operand sequenceLengths, + Operand output, Operand outputH, Operand outputC, Operand outputBackprop, + Operand outputHBackprop, Operand outputCBackprop, Operand reserveSpace, + Operand hostReserved, CudnnRNNBackprop.Options... options) { + return CudnnRNNBackprop.create(scope, input, inputH, inputC, params, sequenceLengths, output, outputH, outputC, outputBackprop, outputHBackprop, outputCBackprop, reserveSpace, hostReserved, options); + } + /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. * Writes a set of weights into the opaque params buffer so they can be used in @@ -564,7 +830,6 @@ public CtcLoss ctcLoss(Operand inputs, Operand * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param data type for {@code params} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -612,7 +877,6 @@ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParam * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param data type for {@code weights} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -653,7 +917,6 @@ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonica * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. * - * @param data type for {@code params_size} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -674,7 +937,6 @@ public CudnnRnnParamsSize cudnnRnnPara * Returns the dimension index in the destination data format given the one in * the source data format. * - * @param data type for {@code y} output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). * @param options carries optional attribute values @@ -718,7 +980,6 @@ public DataFormatDimMap dataFormatDimMap(Operand x, * [1, 2] * * - * @param data type for {@code y} output * @param x Tensor of rank 1 or 2 in source data format. * @param options carries optional attribute values * @param data type for {@code DataFormatVecPermute} output and operands @@ -806,7 +1067,6 @@ public DataFormatVecPermute dataFormatVecPermute(Operand< * * * - * @param data type for {@code output} output * @param input The input value * @param blockSize The size of the spatial block, same as in Space2Depth. * @param options carries optional attribute values @@ -837,7 +1097,6 @@ public DepthToSpace depthToSpace(Operand input, Long blo *

Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension @@ -856,7 +1115,6 @@ public DepthwiseConv2dNative depthwiseConv2dNative(Operan /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param data type for {@code output} output * @param input 4-D with shape based on {@code data_format}. For example, if * {@code data_format} is 'NHWC' then {@code input} is a 4-D {@code [batch, in_height, in_width, in_channels]} tensor. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, @@ -882,7 +1140,6 @@ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2 /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the shape of {@code input}, based * on {@code data_format}. For example, if {@code data_format} is 'NHWC' then * {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. @@ -929,7 +1186,6 @@ public DepthwiseConv2dNativeBackpropInput depthwiseConv2d *

Note on duality: The dilation of {@code input} by the {@code filter} is equal to the * negation of the erosion of {@code -input} by the reflected {@code filter}. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param strides The stride of the sliding window for each dimension of the input @@ -948,7 +1204,6 @@ public Dilation2d dilation2d(Operand input, Operand /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param data type for {@code filter_backprop} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. @@ -969,7 +1224,6 @@ public Dilation2dBackpropFilter dilation2dBackpropFilter( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param data type for {@code in_backprop} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. @@ -1010,7 +1264,6 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op *

See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Elu} output and operands * @return a new instance of Elu @@ -1019,6 +1272,18 @@ public Elu elu(Operand features) { return Elu.create(scope, features); } + /** + * Computes gradients for the exponential linear (Elu) operation. + * + * @param gradients The backpropagated gradients to the corresponding Elu operation. + * @param outputs The outputs of the corresponding Elu operation. + * @param data type for {@code EluGrad} output and operands + * @return a new instance of EluGrad + */ + public EluGrad eluGrad(Operand gradients, Operand outputs) { + return EluGrad.create(scope, gradients, outputs); + } + /** * Generates labels for candidate sampling with a learned unigram distribution. * A unigram sampler could use a fixed unigram distribution read from a @@ -1057,7 +1322,6 @@ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid @@ -1074,6 +1338,32 @@ public FractionalAvgPool fractionalAvgPool(Operand val return FractionalAvgPool.create(scope, value, poolingRatio, options); } + /** + * Computes gradient of the FractionalAvgPool function. + * Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + * FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + * out_backprop to those indices that form the same pooling cell. Therefore, we + * just need to know the shape of original input tensor, instead of the whole + * tensor. + * + * @param origInputTensorShape Original input tensor shape for {@code fractional_avg_pool} + * @param outBackprop 4-D with shape {@code [batch, height, width, channels]}. Gradients + * w.r.t. the output of {@code fractional_avg_pool}. + * @param rowPoolingSequence row pooling sequence, form pooling region with + * col_pooling_sequence. + * @param colPoolingSequence column pooling sequence, form pooling region with + * row_pooling sequence. + * @param options carries optional attribute values + * @param data type for {@code FractionalAvgPoolGrad} output and operands + * @return a new instance of FractionalAvgPoolGrad + */ + public FractionalAvgPoolGrad fractionalAvgPoolGrad( + Operand origInputTensorShape, Operand outBackprop, + Operand rowPoolingSequence, Operand colPoolingSequence, + FractionalAvgPoolGrad.Options... options) { + return FractionalAvgPoolGrad.create(scope, origInputTensorShape, outBackprop, rowPoolingSequence, colPoolingSequence, options); + } + /** * Performs fractional max pooling on the input. * Fractional max pooling is slightly different than regular max pooling. In @@ -1103,7 +1393,6 @@ public FractionalAvgPool fractionalAvgPool(Operand val *

For more details on fractional max pooling, see this paper: * Benjamin Graham, Fractional Max-Pooling * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid @@ -1120,13 +1409,32 @@ public FractionalMaxPool fractionalMaxPool(Operand val return FractionalMaxPool.create(scope, value, poolingRatio, options); } + /** + * Computes gradient of the FractionalMaxPool function. + * + * @param origInput Original input for {@code fractional_max_pool} + * @param origOutput Original output for {@code fractional_max_pool} + * @param outBackprop 4-D with shape {@code [batch, height, width, channels]}. Gradients + * w.r.t. the output of {@code fractional_max_pool}. + * @param rowPoolingSequence row pooling sequence, form pooling region with + * col_pooling_sequence. + * @param colPoolingSequence column pooling sequence, form pooling region with + * row_pooling sequence. + * @param options carries optional attribute values + * @param data type for {@code FractionalMaxPoolGrad} output and operands + * @return a new instance of FractionalMaxPoolGrad + */ + public FractionalMaxPoolGrad fractionalMaxPoolGrad(Operand origInput, + Operand origOutput, Operand outBackprop, Operand rowPoolingSequence, + Operand colPoolingSequence, FractionalMaxPoolGrad.Options... options) { + return FractionalMaxPoolGrad.create(scope, origInput, origOutput, outBackprop, rowPoolingSequence, colPoolingSequence, options); + } + /** * Batch normalization. * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code y} output - * @param data type for {@code batch_mean} output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -1150,8 +1458,6 @@ public FusedBatchNorm fusedBatchNor * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code x_backprop} output - * @param data type for {@code scale_backprop} output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -1192,7 +1498,6 @@ public FusedBatchNormGrad fusedBatc * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param paddings A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. @@ -1224,7 +1529,6 @@ public FusedPadConv2d fusedPadConv2d(Operand input, * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param sizeOutput A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -1246,6 +1550,156 @@ public FusedResizeAndPadConv2d fusedResizeAndPadConv2d(Op return FusedResizeAndPadConv2d.create(scope, input, sizeOutput, paddings, filter, mode, strides, padding, options); } + /** + * Computes the GRU cell forward propagation for 1 time step. + * Args + * x: Input to the GRU cell. + * h_prev: State input from the previous GRU cell. + * w_ru: Weight matrix for the reset and update gate. + * w_c: Weight matrix for the cell connection gate. + * b_ru: Bias vector for the reset and update gate. + * b_c: Bias vector for the cell connection gate. + *

Returns + * r: Output of the reset gate. + * u: Output of the update gate. + * c: Output of the cell connection gate. + * h: Current state of the GRU cell. + *

Note on notation of the variables: + *

Concatenation of a and b is represented by a_b + * Element-wise dot product of a and b is represented by ab + * Element-wise dot product is represented by \circ + * Matrix multiplication is represented by * + *

Biases are initialized with : + * {@code b_ru} - constant_initializer(1.0) + * {@code b_c} - constant_initializer(0.0) + *

This kernel op implements the following mathematical equations: + *

+   *  x_h_prev = [x, h_prev]
+   *
+   *  [r_bar u_bar] = x_h_prev * w_ru + b_ru
+   *
+   *  r = sigmoid(r_bar)
+   *  u = sigmoid(u_bar)
+   *
+   *  h_prevr = h_prev \circ r
+   *
+   *  x_h_prevr = [x h_prevr]
+   *
+   *  c_bar = x_h_prevr * w_c + b_c
+   *  c = tanh(c_bar)
+   *
+   *  h = (1-u) \circ c + u \circ h_prev
+   *  
+ * + * @param x The x value + * @param hPrev The hPrev value + * @param wRu The wRu value + * @param wC The wC value + * @param bRu The bRu value + * @param bC The bC value + * @param data type for {@code GRUBlockCell} output and operands + * @return a new instance of GRUBlockCell + */ + public GRUBlockCell gRUBlockCell(Operand x, Operand hPrev, + Operand wRu, Operand wC, Operand bRu, Operand bC) { + return GRUBlockCell.create(scope, x, hPrev, wRu, wC, bRu, bC); + } + + /** + * Computes the GRU cell back-propagation for 1 time step. + * Args + * x: Input to the GRU cell. + * h_prev: State input from the previous GRU cell. + * w_ru: Weight matrix for the reset and update gate. + * w_c: Weight matrix for the cell connection gate. + * b_ru: Bias vector for the reset and update gate. + * b_c: Bias vector for the cell connection gate. + * r: Output of the reset gate. + * u: Output of the update gate. + * c: Output of the cell connection gate. + * d_h: Gradients of the h_new wrt to objective function. + *

Returns + * d_x: Gradients of the x wrt to objective function. + * d_h_prev: Gradients of the h wrt to objective function. + * d_c_bar Gradients of the c_bar wrt to objective function. + * d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function. + *

This kernel op implements the following mathematical equations: + *

Note on notation of the variables: + *

Concatenation of a and b is represented by a_b + * Element-wise dot product of a and b is represented by ab + * Element-wise dot product is represented by \circ + * Matrix multiplication is represented by * + *

Additional notes for clarity: + *

{@code w_ru} can be segmented into 4 different matrices. + *

+   *  w_ru = [w_r_x w_u_x
+   *          w_r_h_prev w_u_h_prev]
+   *  
+ *

Similarly, {@code w_c} can be segmented into 2 different matrices. + *

+   *  w_c = [w_c_x w_c_h_prevr]
+   *  
+ *

Same goes for biases. + *

+   *  b_ru = [b_ru_x b_ru_h]
+   *  b_c = [b_c_x b_c_h]
+   *  
+ *

Another note on notation: + *

+   *  d_x = d_x_component_1 + d_x_component_2
+   *
+   *  where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T
+   *  and d_x_component_2 = d_c_bar * w_c_x^T
+   *
+   *  d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u
+   *  where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T
+   *  
+ *

Mathematics behind the Gradients below: + *

+   *  d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
+   *  d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
+   *
+   *  d_r_bar_u_bar = [d_r_bar d_u_bar]
+   *
+   *  [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
+   *
+   *  [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
+   *
+   *  d_x = d_x_component_1 + d_x_component_2
+   *
+   *  d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
+   *  
+ *

Below calculation is performed in the python wrapper for the Gradients + * (not in the gradient kernel.) + *

+   *  d_w_ru = x_h_prevr^T * d_c_bar
+   *
+   *  d_w_c = x_h_prev^T * d_r_bar_u_bar
+   *
+   *  d_b_ru = sum of d_r_bar_u_bar along axis = 0
+   *
+   *  d_b_c = sum of d_c_bar along axis = 0
+   *  
+ * + * @param x The x value + * @param hPrev The hPrev value + * @param wRu The wRu value + * @param wC The wC value + * @param bRu The bRu value + * @param bC The bC value + * @param r The r value + * @param u The u value + * @param c The c value + * @param dH The dH value + * @param data type for {@code GRUBlockCellGrad} output and operands + * @return a new instance of GRUBlockCellGrad + */ + public GRUBlockCellGrad gRUBlockCellGrad(Operand x, Operand hPrev, + Operand wRu, Operand wC, Operand bRu, Operand bC, Operand r, Operand u, + Operand c, Operand dH) { + return GRUBlockCellGrad.create(scope, x, hPrev, wRu, wC, bRu, bC, r, u, c, dH); + } + /** * Says whether the targets are in the top {@code K} predictions. * This outputs a {@code batch_size} bool array, an entry {@code out[i]} is {@code true} if the @@ -1271,6 +1725,43 @@ public InTopK inTopK(Operand predictions, Operand< return InTopK.create(scope, predictions, targets, k); } + /** + * Computes the gradient for the inverse of {@code x} wrt its input. + * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} + * is the corresponding input gradient. + * + * @param y The y value + * @param dy The dy value + * @param data type for {@code InvGrad} output and operands + * @return a new instance of InvGrad + */ + public InvGrad invGrad(Operand y, Operand dy) { + return InvGrad.create(scope, y, dy); + } + + /** + * Solves a batch of isotonic regression problems. + * + * @param input A (batch_size, dim)-tensor holding a batch of inputs. + * @return a new instance of IsotonicRegression, with default output types + */ + public IsotonicRegression isotonicRegression(Operand input) { + return IsotonicRegression.create(scope, input); + } + + /** + * Solves a batch of isotonic regression problems. + * + * @param input A (batch_size, dim)-tensor holding a batch of inputs. + * @param outputDtype Dtype of output. + * @param data type for {@code IsotonicRegression} output and operands + * @return a new instance of IsotonicRegression + */ + public IsotonicRegression isotonicRegression( + Operand input, Class outputDtype) { + return IsotonicRegression.create(scope, input, outputDtype); + } + /** * L2 Loss. * Computes half the L2 norm of a tensor without the {@code sqrt}: @@ -1278,7 +1769,6 @@ public InTopK inTopK(Operand predictions, Operand< * output = sum(t ** 2) / 2 * * - * @param data type for {@code output} output * @param t Typically 2-D, but may have any dimensions. * @param data type for {@code L2Loss} output and operands * @return a new instance of L2Loss @@ -1287,10 +1777,83 @@ public L2Loss l2Loss(Operand t) { return L2Loss.create(scope, t); } + /** + * Computes the LSTM cell forward propagation for 1 time step. + * This implementation uses 1 weight matrix and 1 bias vector, and there's an + * optional peephole connection. + *

This kernel op implements the following mathematical equations: + *

+   *  xh = [x, h_prev]
+   *  [i, f, ci, o] = xh * w + b
+   *  f = f + forget_bias
+   *
+   *  if not use_peephole:
+   *    wci = wcf = wco = 0
+   *
+   *  i = sigmoid(cs_prev * wci + i)
+   *  f = sigmoid(cs_prev * wcf + f)
+   *  ci = tanh(ci)
+   *
+   *  cs = ci .* i + cs_prev .* f
+   *  cs = clip(cs, cell_clip)
+   *
+   *  o = sigmoid(cs * wco + o)
+   *  co = tanh(cs)
+   *  h = co .* o
+   *  
+ * + * @param x The input to the LSTM cell, shape (batch_size, num_inputs). + * @param csPrev Value of the cell state at previous time step. + * @param hPrev Output of the previous cell at previous time step. + * @param w The weight matrix. + * @param wci The weight matrix for input gate peephole connection. + * @param wcf The weight matrix for forget gate peephole connection. + * @param wco The weight matrix for output gate peephole connection. + * @param b The bias vector. + * @param options carries optional attribute values + * @param data type for {@code LSTMBlockCell} output and operands + * @return a new instance of LSTMBlockCell + */ + public LSTMBlockCell lSTMBlockCell(Operand x, Operand csPrev, + Operand hPrev, Operand w, Operand wci, Operand wcf, Operand wco, Operand b, + LSTMBlockCell.Options... options) { + return LSTMBlockCell.create(scope, x, csPrev, hPrev, w, wci, wcf, wco, b, options); + } + + /** + * Computes the LSTM cell backward propagation for 1 timestep. + * This implementation is to be used in conjunction of LSTMBlockCell. + * + * @param x The input to the LSTM cell, shape (batch_size, num_inputs). + * @param csPrev The previous cell state. + * @param hPrev The previous h state. + * @param w The weight matrix. + * @param wci The weight matrix for input gate peephole connection. + * @param wcf The weight matrix for forget gate peephole connection. + * @param wco The weight matrix for output gate peephole connection. + * @param b The bias vector. + * @param i The input gate. + * @param cs The cell state before the tanh. + * @param f The forget gate. + * @param o The output gate. + * @param ci The cell input. + * @param co The cell after the tanh. + * @param csGrad The current gradient of cs. + * @param hGrad The gradient of h vector. + * @param usePeephole Whether the cell uses peephole connections. + * @param data type for {@code LSTMBlockCellGrad} output and operands + * @return a new instance of LSTMBlockCellGrad + */ + public LSTMBlockCellGrad lSTMBlockCellGrad(Operand x, Operand csPrev, + Operand hPrev, Operand w, Operand wci, Operand wcf, Operand wco, Operand b, + Operand i, Operand cs, Operand f, Operand o, Operand ci, Operand co, + Operand csGrad, Operand hGrad, Boolean usePeephole) { + return LSTMBlockCellGrad.create(scope, x, csPrev, hPrev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, csGrad, hGrad, usePeephole); + } + /** * Computes rectified linear: {@code max(features, features * alpha)}. * - * @param data type for {@code activations} output * @param features The features value * @param options carries optional attribute values * @param data type for {@code LeakyRelu} output and operands @@ -1342,7 +1905,6 @@ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(OperandFor details, see Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS 2012) . * - * @param data type for {@code output} output * @param input 4-D. * @param options carries optional attribute values * @param data type for {@code LRN} output and operands @@ -1353,6 +1915,22 @@ public LocalResponseNormalization localResponseNormalizat return LocalResponseNormalization.create(scope, input, options); } + /** + * Gradients for Local Response Normalization. + * + * @param inputGrads 4-D with shape {@code [batch, height, width, channels]}. + * @param inputImage 4-D with shape {@code [batch, height, width, channels]}. + * @param outputImage 4-D with shape {@code [batch, height, width, channels]}. + * @param options carries optional attribute values + * @param data type for {@code LRNGrad} output and operands + * @return a new instance of LocalResponseNormalizationGrad + */ + public LocalResponseNormalizationGrad localResponseNormalizationGrad( + Operand inputGrads, Operand inputImage, Operand outputImage, + LocalResponseNormalizationGrad.Options... options) { + return LocalResponseNormalizationGrad.create(scope, inputGrads, inputImage, outputImage, options); + } + /** * Computes log softmax activations. * For each batch {@code i} and class {@code j} we have @@ -1360,7 +1938,6 @@ public LocalResponseNormalization localResponseNormalizat * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * * - * @param data type for {@code logsoftmax} output * @param logits 2-D with shape {@code [batch_size, num_classes]}. * @param data type for {@code LogSoftmax} output and operands * @return a new instance of LogSoftmax @@ -1372,7 +1949,6 @@ public LogSoftmax logSoftmax(Operand logits) { /** * Performs max pooling on the input. * - * @param data type for {@code output} output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -1390,7 +1966,6 @@ public MaxPool maxPool(Operand input, Operand /** * Performs 3D max pooling on the input. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. @@ -1409,7 +1984,6 @@ public MaxPool3d maxPool3d(Operand input, List k /** * Computes gradients of 3D max pooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. @@ -1432,7 +2006,6 @@ public MaxPool3dGrad maxPool3dGrad(Ope /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. @@ -1454,7 +2027,6 @@ public MaxPool3dGradGrad maxPool3dGradGrad(Operand ori /** * Computes gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad 4-D. Gradients w.r.t. the output of {@code max_pool}. @@ -1475,7 +2047,6 @@ public MaxPoolGrad maxPoolGrad(Operand origInput, Oper /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. @@ -1496,7 +2067,6 @@ public MaxPoolGradGrad maxPoolGradGrad(Operand origInp /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param input The original input. * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the * input of {@code max_pool}. @@ -1515,6 +2085,27 @@ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgma return MaxPoolGradGradWithArgmax.create(scope, input, grad, argmax, ksize, strides, padding, options); } + /** + * Computes gradients of the maxpooling function. + * + * @param input The original input. + * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the + * output of {@code max_pool}. + * @param argmax The indices of the maximum values chosen for each output of {@code max_pool}. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for {@code MaxPoolGradWithArgmax} output and operands + * @return a new instance of MaxPoolGradWithArgmax + */ + public MaxPoolGradWithArgmax maxPoolGradWithArgmax(Operand input, + Operand grad, Operand argmax, List ksize, List strides, + String padding, MaxPoolGradWithArgmax.Options... options) { + return MaxPoolGradWithArgmax.create(scope, input, grad, argmax, ksize, strides, padding, options); + } + /** * Performs max pooling on the input and outputs both max values and indices. * The indices in {@code argmax} are flattened, so that a maximum value at position @@ -1526,8 +2117,6 @@ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgma * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -1553,8 +2142,6 @@ public MaxPoolWithArgmax maxPoolWithArgmax(Operan * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -1582,7 +2169,6 @@ public MaxPoolWithArgmax maxPoolWit * values.shape = input.shape[:-1] * * - * @param data type for {@code values} output * @param input 1-D or higher with last dimension at least {@code n+1}. * @param n 0-D. Position of sorted vector to select along the last dimension (along * each row for matrices). Valid range of n is {@code [0, input.shape[:-1])} @@ -1598,7 +2184,6 @@ public NthElement nthElement(Operand input, Operand data type for {@code output} output * @param input 4-D with shape {@code [batch, height, width, channels]}. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -1621,7 +2206,6 @@ public QuantizedAvgPool quantizedAvgPool(Operand input * This op is deprecated and will be removed in the future. Prefer * {@code tf.nn.batch_normalization}. * - * @param data type for {@code result} output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -1665,7 +2249,6 @@ public QuantizedBatchNormWithGlobalNormal * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param data type for {@code output} output * @param input The input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. @@ -1682,6 +2265,306 @@ public QuantizedBiasAdd quantizedBiasAdd(Operand data type for {@code QuantizedConv2DAndRelu} output and operands + * @return a new instance of QuantizedConv2DAndRelu + */ + public QuantizedConv2DAndRelu quantizedConv2DAndRelu( + Operand input, Operand filter, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedConv2DAndRelu.Options... options) { + return QuantizedConv2DAndRelu.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DAndReluAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DAndReluAndRequantize} output and operands + * @return a new instance of QuantizedConv2DAndReluAndRequantize + */ + public QuantizedConv2DAndReluAndRequantize quantizedConv2DAndReluAndRequantize( + Operand input, Operand filter, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Class outType, List strides, String padding, + QuantizedConv2DAndReluAndRequantize.Options... options) { + return QuantizedConv2DAndReluAndRequantize.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DAndRequantize} output and operands + * @return a new instance of QuantizedConv2DAndRequantize + */ + public QuantizedConv2DAndRequantize quantizedConv2DAndRequantize( + Operand input, Operand filter, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Class outType, List strides, String padding, + QuantizedConv2DAndRequantize.Options... options) { + return QuantizedConv2DAndRequantize.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, outType, strides, padding, options); + } + + /** + * Computes QuantizedConv2D per channel. + * + * @param input The original input tensor. + * @param filter The original filter tensor. + * @param minInput The minimum value of the input tensor + * @param maxInput The maximum value of the input tensor. + * @param minFilter The minimum value of the filter tensor. + * @param maxFilter The maximum value of the filter tensor. + * @param outType The quantized type of output tensor that needs to be converted. + * @param strides list of stride values. + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DPerChannel} output and operands + * @return a new instance of QuantizedConv2DPerChannel + */ + public QuantizedConv2DPerChannel quantizedConv2DPerChannel( + Operand input, Operand filter, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedConv2DPerChannel.Options... options) { + return QuantizedConv2DPerChannel.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBias operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBias} output and operands + * @return a new instance of QuantizedConv2DWithBias + */ + public QuantizedConv2DWithBias quantizedConv2DWithBias( + Operand input, Operand filter, Operand bias, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedConv2DWithBias.Options... options) { + return QuantizedConv2DWithBias.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasAndRelu operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasAndRelu} output and operands + * @return a new instance of QuantizedConv2DWithBiasAndRelu + */ + public QuantizedConv2DWithBiasAndRelu quantizedConv2DWithBiasAndRelu( + Operand input, Operand filter, Operand bias, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedConv2DWithBiasAndRelu.Options... options) { + return QuantizedConv2DWithBiasAndRelu.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasAndReluAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasAndReluAndRequantize} output and operands + * @return a new instance of QuantizedConv2DWithBiasAndReluAndRequantize + */ + public QuantizedConv2DWithBiasAndReluAndRequantize quantizedConv2DWithBiasAndReluAndRequantize( + Operand input, Operand filter, + Operand bias, Operand minInput, Operand maxInput, + Operand minFilter, Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Class outType, List strides, String padding, + QuantizedConv2DWithBiasAndReluAndRequantize.Options... options) { + return QuantizedConv2DWithBiasAndReluAndRequantize.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasAndRequantize} output and operands + * @return a new instance of QuantizedConv2DWithBiasAndRequantize + */ + public QuantizedConv2DWithBiasAndRequantize quantizedConv2DWithBiasAndRequantize( + Operand input, Operand filter, + Operand bias, Operand minInput, Operand maxInput, + Operand minFilter, Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Class outType, List strides, String padding, + QuantizedConv2DWithBiasAndRequantize.Options... options) { + return QuantizedConv2DWithBiasAndRequantize.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasSignedSumAndReluAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param summand The summand value + * @param minSummand The minSummand value + * @param maxSummand The maxSummand value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasSignedSumAndReluAndRequantize} output and operands + * @return a new instance of QuantizedConv2DWithBiasSignedSumAndReluAndRequantize + */ + public QuantizedConv2DWithBiasSignedSumAndReluAndRequantize quantizedConv2DWithBiasSignedSumAndReluAndRequantize( + Operand input, Operand filter, + Operand bias, Operand minInput, Operand maxInput, + Operand minFilter, Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Operand summand, + Operand minSummand, Operand maxSummand, Class outType, + List strides, String padding, + QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.Options... options) { + return QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, summand, minSummand, maxSummand, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasSumAndRelu operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param summand The summand value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasSumAndRelu} output and operands + * @return a new instance of QuantizedConv2DWithBiasSumAndRelu + */ + public QuantizedConv2DWithBiasSumAndRelu quantizedConv2DWithBiasSumAndRelu( + Operand input, Operand filter, Operand bias, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Operand summand, Class outType, List strides, + String padding, QuantizedConv2DWithBiasSumAndRelu.Options... options) { + return QuantizedConv2DWithBiasSumAndRelu.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, summand, outType, strides, padding, options); + } + + /** + * The QuantizedConv2DWithBiasSumAndReluAndRequantize operation + * + * @param input The input value + * @param filter The filter value + * @param bias The bias value + * @param minInput The minInput value + * @param maxInput The maxInput value + * @param minFilter The minFilter value + * @param maxFilter The maxFilter value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param summand The summand value + * @param minSummand The minSummand value + * @param maxSummand The maxSummand value + * @param outType The value of the outType attribute + * @param strides The value of the strides attribute + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedConv2DWithBiasSumAndReluAndRequantize} output and operands + * @return a new instance of QuantizedConv2DWithBiasSumAndReluAndRequantize + */ + public QuantizedConv2DWithBiasSumAndReluAndRequantize quantizedConv2DWithBiasSumAndReluAndRequantize( + Operand input, Operand filter, + Operand bias, Operand minInput, Operand maxInput, + Operand minFilter, Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Operand summand, + Operand minSummand, Operand maxSummand, Class outType, + List strides, String padding, + QuantizedConv2DWithBiasSumAndReluAndRequantize.Options... options) { + return QuantizedConv2DWithBiasSumAndReluAndRequantize.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, summand, minSummand, maxSummand, outType, strides, padding, options); + } + /** * Computes a 2D convolution given quantized 4D input and filter tensors. * The inputs are quantized tensors where the lowest value represents the real @@ -1689,7 +2572,6 @@ public QuantizedBiasAdd quantizedBiasAdd(Operand data type for {@code output} output * @param input The input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. @@ -1711,10 +2593,111 @@ public QuantizedConv2d quantizedConv2d(Operand data type for {@code QuantizedDepthwiseConv2D} output and operands + * @return a new instance of QuantizedDepthwiseConv2D + */ + public QuantizedDepthwiseConv2D quantizedDepthwiseConv2D( + Operand input, Operand filter, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedDepthwiseConv2D.Options... options) { + return QuantizedDepthwiseConv2D.create(scope, input, filter, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * Computes quantized depthwise Conv2D with Bias. + * + * @param input The original input tensor. + * @param filter The original filter tensor. + * @param bias The original bias tensor. + * @param minInput The float value that the minimum quantized input value represents. + * @param maxInput The float value that the maximum quantized input value represents. + * @param minFilter The float value that the minimum quantized filter value represents. + * @param maxFilter The float value that the maximum quantized filter value represents. + * @param outType The type of the output. + * @param strides List of stride values. + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedDepthwiseConv2DWithBias} output and operands + * @return a new instance of QuantizedDepthwiseConv2DWithBias + */ + public QuantizedDepthwiseConv2DWithBias quantizedDepthwiseConv2DWithBias( + Operand input, Operand filter, Operand bias, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedDepthwiseConv2DWithBias.Options... options) { + return QuantizedDepthwiseConv2DWithBias.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * Computes quantized depthwise Conv2D with Bias and Relu. + * + * @param input The original input tensor. + * @param filter The original filter tensor. + * @param bias The original bias tensor. + * @param minInput The float value that the minimum quantized input value represents. + * @param maxInput The float value that the maximum quantized input value represents. + * @param minFilter The float value that the minimum quantized filter value represents. + * @param maxFilter The float value that the maximum quantized filter value represents. + * @param outType The type of the output. + * @param strides List of stride values. + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedDepthwiseConv2DWithBiasAndRelu} output and operands + * @return a new instance of QuantizedDepthwiseConv2DWithBiasAndRelu + */ + public QuantizedDepthwiseConv2DWithBiasAndRelu quantizedDepthwiseConv2DWithBiasAndRelu( + Operand input, Operand filter, Operand bias, + Operand minInput, Operand maxInput, Operand minFilter, + Operand maxFilter, Class outType, List strides, String padding, + QuantizedDepthwiseConv2DWithBiasAndRelu.Options... options) { + return QuantizedDepthwiseConv2DWithBiasAndRelu.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, outType, strides, padding, options); + } + + /** + * Computes quantized depthwise Conv2D with Bias, Relu and Requantize. + * + * @param input The original input tensor. + * @param filter The original filter tensor. + * @param bias The original bias tensor. + * @param minInput The float value that the minimum quantized input value represents. + * @param maxInput The float value that the maximum quantized input value represents. + * @param minFilter The float value that the minimum quantized filter value represents. + * @param maxFilter The float value that the maximum quantized filter value represents. + * @param minFreezedOutput The minimum float value of the output tensor. + * @param maxFreezedOutput The maximum float value of the output tensor. + * @param outType The type of the output. + * @param strides List of stride values. + * @param padding The value of the padding attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize} output and operands + * @return a new instance of QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize + */ + public QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize quantizedDepthwiseConv2DWithBiasAndReluAndRequantize( + Operand input, Operand filter, + Operand bias, Operand minInput, Operand maxInput, + Operand minFilter, Operand maxFilter, Operand minFreezedOutput, + Operand maxFreezedOutput, Class outType, List strides, String padding, + QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.Options... options) { + return QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.create(scope, input, filter, bias, minInput, maxInput, minFilter, maxFilter, minFreezedOutput, maxFreezedOutput, outType, strides, padding, options); + } + /** * Quantized Instance normalization. * - * @param data type for {@code y} output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. @@ -1730,7 +2713,6 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operan /** * Produces the max pool of the input tensor for quantized types. * - * @param data type for {@code output} output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -1751,7 +2733,6 @@ public QuantizedMaxPool quantizedMaxPool(Operand input /** * Computes Quantized Rectified Linear: {@code max(features, 0)} * - * @param data type for {@code activations} output * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. @@ -1767,7 +2748,6 @@ public QuantizedRelu quantizedRelu(Operand data type for {@code activations} output * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. @@ -1783,7 +2763,6 @@ public QuantizedRelu6 quantizedRelu6(Operand data type for {@code activations} output * @param features The features value * @param maxValue The maxValue value * @param minFeatures The float value that the lowest quantized value represents. @@ -1811,7 +2790,6 @@ public QuantizedReluX quantizedReluX(Operand * * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Relu} output and operands * @return a new instance of Relu @@ -1823,7 +2801,6 @@ public Relu relu(Operand features) { /** * Computes rectified linear 6: {@code min(max(features, 0), 6)}. * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Relu6} output and operands * @return a new instance of Relu6 @@ -1832,6 +2809,32 @@ public Relu6 relu6(Operand features) { return Relu6.create(scope, features); } + /** + * Computes rectified linear 6 gradients for a Relu6 operation. + * + * @param gradients The backpropagated gradients to the corresponding Relu6 operation. + * @param features The features passed as input to the corresponding Relu6 operation, or + * its output; using either one produces the same result. + * @param data type for {@code Relu6Grad} output and operands + * @return a new instance of Relu6Grad + */ + public Relu6Grad relu6Grad(Operand gradients, Operand features) { + return Relu6Grad.create(scope, gradients, features); + } + + /** + * Computes rectified linear gradients for a Relu operation. + * + * @param gradients The backpropagated gradients to the corresponding Relu operation. + * @param features The features passed as input to the corresponding Relu operation, OR + * the outputs of that operation (both work equivalently). + * @param data type for {@code ReluGrad} output and operands + * @return a new instance of ReluGrad + */ + public ReluGrad reluGrad(Operand gradients, Operand features) { + return ReluGrad.create(scope, gradients, features); + } + /** * Computes scaled exponential linear: {@code scale * alpha * (exp(features) - 1)} * if < 0, {@code scale * features} otherwise. @@ -1840,7 +2843,6 @@ public Relu6 relu6(Operand features) { * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. *

See Self-Normalizing Neural Networks * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Selu} output and operands * @return a new instance of Selu @@ -1849,6 +2851,18 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } + /** + * Computes gradients for the scaled exponential linear (Selu) operation. + * + * @param gradients The backpropagated gradients to the corresponding Selu operation. + * @param outputs The outputs of the corresponding Selu operation. + * @param data type for {@code SeluGrad} output and operands + * @return a new instance of SeluGrad + */ + public SeluGrad seluGrad(Operand gradients, Operand outputs) { + return SeluGrad.create(scope, gradients, outputs); + } + /** * Computes softmax activations. * For each batch {@code i} and class {@code j} we have @@ -1856,7 +2870,6 @@ public Selu selu(Operand features) { * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * * - * @param data type for {@code softmax} output * @param logits 2-D with shape {@code [batch_size, num_classes]}. * @param data type for {@code Softmax} output and operands * @return a new instance of Softmax @@ -1869,7 +2882,6 @@ public Softmax softmax(Operand logits) { * Computes softmax cross entropy cost and gradients to backpropagate. * Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid @@ -1885,7 +2897,6 @@ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyW /** * Computes softsign: {@code features / (abs(features) + 1)}. * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Softsign} output and operands * @return a new instance of Softsign @@ -1894,6 +2905,19 @@ public Softsign softsign(Operand features) { return Softsign.create(scope, features); } + /** + * Computes softsign gradients for a softsign operation. + * + * @param gradients The backpropagated gradients to the corresponding softsign operation. + * @param features The features passed as input to the corresponding softsign operation. + * @param data type for {@code SoftsignGrad} output and operands + * @return a new instance of SoftsignGrad + */ + public SoftsignGrad softsignGrad(Operand gradients, + Operand features) { + return SoftsignGrad.create(scope, gradients, features); + } + /** * SpaceToBatch for 4-D tensors of type T. * This is a legacy version of the more general SpaceToBatchND. @@ -1961,7 +2985,6 @@ public Softsign softsign(Operand features) { *

Among others, this operation is useful for reducing atrous convolution into * regular convolution. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, height, width, depth]}. * @param paddings 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies * the padding of the input with zeros across the spatial dimensions as follows: @@ -2053,7 +3076,6 @@ public SpaceToBatch spaceToBatch(Operand input, * [13, 14, 15, 16]]]] * * - * @param data type for {@code output} output * @param input The input value * @param blockSize The size of the spatial block. * @param options carries optional attribute values @@ -2073,7 +3095,6 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * given row. *

Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. @@ -2097,8 +3118,6 @@ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxC * *

If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values} output - * @param data type for {@code indices} output * @param input 1-D or higher with last dimension at least {@code k}. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). @@ -2107,7 +3126,7 @@ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxC * @return a new instance of TopK, with default output types */ public TopK topK(Operand input, Operand k, - TopK.Options[] options) { + TopK.Options... options) { return TopK.create(scope, input, k, options); } @@ -2123,8 +3142,6 @@ public TopK topK(Operand input, Operand *

If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values} output - * @param data type for {@code indices} output * @param input 1-D or higher with last dimension at least {@code k}. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). @@ -2139,6 +3156,119 @@ public TopK topK(Operand input, return TopK.create(scope, input, k, indexType, options); } + /** + * Perform quantized convolution of quantized Tensor {@code lhs} and quantized Tensor {@code rhs}. to make quantized {@code output}. + * Given quantized {@code lhs} and quantized {@code rhs}, performs quantized dot on {@code lhs} and {@code rhs} to make quantized {@code output}. + *

{@code lhs} and {@code rhs} must be Tensors of same rank, and meet following shape conditions. + *

    + *
  • {@code lhs_feature} % {@code feature_group_count} == 0
  • + *
  • {@code lhs_feature} % {@code rhs_input_feature} == 0
  • + *
  • {@code lhs_feature} / {@code feature_group_count} == {@code rhs_input_feature}
  • + *
  • {@code rhs_output_feature} % {@code feature_group_count} == 0
  • + *
  • {@code lhs_batch} % {@code batch_group_count} == 0
  • + *
  • {@code rhs_output_feature} % {@code batch_group_count} == 0
  • + *
+ *

{@code lhs} and {@code rhs} must be quantized Tensor, where data value is quantized using the formula: + *

+   *  quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val)
+   *  
+ *

{@code output} is also quantized, using the same formula. + * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. + * + * @param lhs Must be a quantized tensor, rank >= 3. + * @param rhs Must be a quantized tensor, same rank as {@code lhs}. + * @param lhsScales The float value(s) used as scale factors when quantizing the original data that {@code lhs} represents. + * Must be a scalar {@code Tensor} ({@code lhs} supports only per-tensor quantization). + * @param lhsZeroPoints The int32 value(s) used as zero points when quantizing original data that {@code lhs} represents. + * Same shape condition as {@code lhs_scales}. + * @param rhsScales The float value(s) used as scale factors when quantizing the original data that {@code rhs} represents. + * Must be a scalar {@code Tensor} for per-tensor quantization, + * or 1D {@code Tensor} of size {@code rhs.dim_size(kernel_output_feature_dimension)}, for per-channel quantization. + * @param rhsZeroPoints The int32 value(s) used as zero points when quantizing original data that {@code rhs} represents. + * Same shape condition as {@code rhs_scales}. + * @param outputScales The float value(s) to use as scale factors when quantizing original data that {@code output} represents. + * Must be a scalar {@code Tensor} for per-tensor quantization, + * or 1D {@code Tensor} of size {@code rhs.dim_size(kernel_output_feature_dimension)} + *

    + *
  • which is equal to {@code output.dim_size(output_feature_dimension)}, + * for per-channel quantization. + * If {@code rhs} is per-tensor quantized, output must be also per-tensor quantized. + * This means that if {@code rhs_scales} and {@code rhs_zero_points} are scalar {@code Tensor}s, {@code output_scales} and {@code output_zero_points} must be scalar {@code Tensor}s as well.
  • + *
+ * @param outputZeroPoints The int32 value(s) used as zero points when quantizing original data that output represents. + * Same shape condition as {@code output_scales}. + * @param Tout The type of {@code output} {@code Tensor}. + * @param padding string from: {@code "SAME"}, {@code "VALID"}, or {@code "EXPLICIT"}, indicating the type of padding algorithm to use. + * @param lhsQuantizationMinVal The min value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param lhsQuantizationMaxVal The max value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. + * @param rhsQuantizationMinVal The min value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param rhsQuantizationMaxVal The max value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. + * @param outputQuantizationMinVal The min value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. + * @param outputQuantizationMaxVal The max value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedConvolution} output and operands + * @param data type for {@code UniformQuantizedConvolution} output and operands + * @return a new instance of UniformQuantizedConvolution + */ + public UniformQuantizedConvolution uniformQuantizedConvolution( + Operand lhs, Operand rhs, Operand lhsScales, Operand lhsZeroPoints, + Operand rhsScales, Operand rhsZeroPoints, Operand outputScales, + Operand outputZeroPoints, Class Tout, String padding, Long lhsQuantizationMinVal, + Long lhsQuantizationMaxVal, Long rhsQuantizationMinVal, Long rhsQuantizationMaxVal, + Long outputQuantizationMinVal, Long outputQuantizationMaxVal, + UniformQuantizedConvolution.Options... options) { + return UniformQuantizedConvolution.create(scope, lhs, rhs, lhsScales, lhsZeroPoints, rhsScales, rhsZeroPoints, outputScales, outputZeroPoints, Tout, padding, lhsQuantizationMinVal, lhsQuantizationMaxVal, rhsQuantizationMinVal, rhsQuantizationMaxVal, outputQuantizationMinVal, outputQuantizationMaxVal, options); + } + + /** + * Perform hybrid quantized convolution of float Tensor {@code lhs} and quantized Tensor {@code rhs}. + * Given float {@code lhs} and quantized {@code rhs}, internally performs quantization on {@code lhs}, + * and then performs quantized convolution on quantized {@code lhs} and {@code rhs}. + *

The internal quantization on {@code lhs} is a quantization to {@code Trhs}, dynamic range, + * per-batch (per-axis along axis {@code dimension_numbers.input_batch_dimension}), asymmetric, + * and not narrow range (the range is [Trhs_MIN, Trhs_MAX]). + *

{@code lhs} and {@code rhs} must be Tensors of same rank, and meet following shape conditions. + *

    + *
  • lhs_feature % feature_group_count == 0
  • + *
  • lhs_feature % rhs_input_feature == 0
  • + *
  • lhs_feature / feature_group_count == rhs_input_feature
  • + *
  • rhs_output_feature % feature_group_count == 0
  • + *
  • lhs_batch % batch_group_count == 0
  • + *
  • rhs_output_feature % batch_group_count == 0
  • + *
+ *

{@code rhs} must be quantized Tensor, where its data value is quantized using the formula: + * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + * + * @param lhs Must be a non-quantized Tensor of {@code Tlhs}, rank >= 3. + * @param rhs Must be a quantized Tensor of {@code Trhs}, same rank as {@code lhs}. + * @param rhsScales The float value(s) used as scale factors when quantizing the original data that {@code rhs} represents. + * Must be a scalar Tensor for per-tensor quantization, + * or 1D Tensor of size {@code rhs.dim_size(kernel_output_feature_dimension)}, for per-channel quantization. + * @param rhsZeroPoints The int32 value(s) used as zero_point when quantizing original data that {@code rhs} represents. + * Same shape condition as {@code rhs_scales}. + * @param Tout The type of output Tensor. + * @param padding string from: {@code "SAME"}, {@code "VALID"}, or {@code "EXPLICIT"}, indicating the type of padding algorithm to use. + * @param rhsQuantizationMinVal The min value of the quantized data stored in {@code rhs}. + * For example, if {@code Trhs} is qint8, this must be set to -127 if narrow range quantized or -128 if not. + * @param rhsQuantizationMaxVal The max value of the quantized data stored in {@code rhs}. + * For example, if {@code Trhs} is qint8, this must be set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedConvolutionHybrid} output and operands + * @return a new instance of UniformQuantizedConvolutionHybrid + */ + public UniformQuantizedConvolutionHybrid uniformQuantizedConvolutionHybrid( + Operand lhs, Operand rhs, Operand rhsScales, + Operand rhsZeroPoints, Class Tout, String padding, Long rhsQuantizationMinVal, + Long rhsQuantizationMaxVal, UniformQuantizedConvolutionHybrid.Options... options) { + return UniformQuantizedConvolutionHybrid.create(scope, lhs, rhs, rhsScales, rhsZeroPoints, Tout, padding, rhsQuantizationMinVal, rhsQuantizationMaxVal, options); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 5dd7f842b47..b9f5cd836f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -27,6 +27,7 @@ import org.tensorflow.ExecutionEnvironment; import org.tensorflow.Operand; import org.tensorflow.Operation; +import org.tensorflow.Tensor; import org.tensorflow.ndarray.BooleanNdArray; import org.tensorflow.ndarray.ByteNdArray; import org.tensorflow.ndarray.DoubleNdArray; @@ -45,6 +46,10 @@ import org.tensorflow.ndarray.index.Index; import org.tensorflow.op.core.Abort; import org.tensorflow.op.core.All; +import org.tensorflow.op.core.AnonymousHashTable; +import org.tensorflow.op.core.AnonymousMutableDenseHashTable; +import org.tensorflow.op.core.AnonymousMutableHashTable; +import org.tensorflow.op.core.AnonymousMutableHashTableOfTensors; import org.tensorflow.op.core.Any; import org.tensorflow.op.core.ApproxTopK; import org.tensorflow.op.core.AssertThat; @@ -68,11 +73,16 @@ import org.tensorflow.op.core.BooleanMask; import org.tensorflow.op.core.BooleanMaskUpdate; import org.tensorflow.op.core.BroadcastDynamicShape; +import org.tensorflow.op.core.BroadcastGradientArgs; import org.tensorflow.op.core.BroadcastTo; import org.tensorflow.op.core.Bucketize; import org.tensorflow.op.core.Case; +import org.tensorflow.op.core.CheckPinned; import org.tensorflow.op.core.ClipByValue; +import org.tensorflow.op.core.CompositeTensorVariantFromComponents; +import org.tensorflow.op.core.CompositeTensorVariantToComponents; import org.tensorflow.op.core.Concat; +import org.tensorflow.op.core.ConcatOffset; import org.tensorflow.op.core.Constant; import org.tensorflow.op.core.ConsumeMutexLock; import org.tensorflow.op.core.ControlTrigger; @@ -84,6 +94,8 @@ import org.tensorflow.op.core.DeleteSessionTensor; import org.tensorflow.op.core.DestroyResourceOp; import org.tensorflow.op.core.DestroyTemporaryVariable; +import org.tensorflow.op.core.DeviceIndex; +import org.tensorflow.op.core.DummyMemoryCache; import org.tensorflow.op.core.DynamicPartition; import org.tensorflow.op.core.DynamicStitch; import org.tensorflow.op.core.EditDistance; @@ -92,14 +104,20 @@ import org.tensorflow.op.core.EmptyTensorMap; import org.tensorflow.op.core.EncodeProto; import org.tensorflow.op.core.EnsureShape; +import org.tensorflow.op.core.Enter; +import org.tensorflow.op.core.Exit; import org.tensorflow.op.core.ExpandDims; import org.tensorflow.op.core.ExtractVolumePatches; +import org.tensorflow.op.core.FakeParam; +import org.tensorflow.op.core.FileSystemSetConfiguration; import org.tensorflow.op.core.Fill; import org.tensorflow.op.core.Fingerprint; import org.tensorflow.op.core.For; import org.tensorflow.op.core.Function; import org.tensorflow.op.core.Gather; import org.tensorflow.op.core.GatherNd; +import org.tensorflow.op.core.GetElementAtIndex; +import org.tensorflow.op.core.GetOptions; import org.tensorflow.op.core.GetSessionHandle; import org.tensorflow.op.core.GetSessionTensor; import org.tensorflow.op.core.Gradients; @@ -107,6 +125,7 @@ import org.tensorflow.op.core.HashTable; import org.tensorflow.op.core.Helpers; import org.tensorflow.op.core.HistogramFixedWidth; +import org.tensorflow.op.core.HostConst; import org.tensorflow.op.core.Identity; import org.tensorflow.op.core.IdentityN; import org.tensorflow.op.core.If; @@ -118,14 +137,18 @@ import org.tensorflow.op.core.InplaceUpdate; import org.tensorflow.op.core.IsVariableInitialized; import org.tensorflow.op.core.KthOrderStatistic; +import org.tensorflow.op.core.LinSpace; import org.tensorflow.op.core.LookupTableExport; import org.tensorflow.op.core.LookupTableFind; import org.tensorflow.op.core.LookupTableImport; import org.tensorflow.op.core.LookupTableInsert; +import org.tensorflow.op.core.LookupTableRemove; import org.tensorflow.op.core.LookupTableSize; import org.tensorflow.op.core.LoopCond; +import org.tensorflow.op.core.LowerBound; import org.tensorflow.op.core.MakeUnique; import org.tensorflow.op.core.MapClear; +import org.tensorflow.op.core.MapDefun; import org.tensorflow.op.core.MapIncompleteSize; import org.tensorflow.op.core.MapPeek; import org.tensorflow.op.core.MapSize; @@ -136,12 +159,16 @@ import org.tensorflow.op.core.Merge; import org.tensorflow.op.core.Min; import org.tensorflow.op.core.MirrorPad; +import org.tensorflow.op.core.MirrorPadGrad; import org.tensorflow.op.core.MlirPassthroughOp; import org.tensorflow.op.core.MutableDenseHashTable; import org.tensorflow.op.core.MutableHashTable; import org.tensorflow.op.core.MutableHashTableOfTensors; import org.tensorflow.op.core.Mutex; import org.tensorflow.op.core.MutexLock; +import org.tensorflow.op.core.NcclAllReduce; +import org.tensorflow.op.core.NcclBroadcast; +import org.tensorflow.op.core.NcclReduce; import org.tensorflow.op.core.NextIteration; import org.tensorflow.op.core.NoOp; import org.tensorflow.op.core.OneHot; @@ -167,12 +194,17 @@ import org.tensorflow.op.core.Range; import org.tensorflow.op.core.Rank; import org.tensorflow.op.core.ReadVariableOp; +import org.tensorflow.op.core.Recv; import org.tensorflow.op.core.ReduceAll; import org.tensorflow.op.core.ReduceAny; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceMin; import org.tensorflow.op.core.ReduceProd; import org.tensorflow.op.core.ReduceSum; +import org.tensorflow.op.core.RefEnter; +import org.tensorflow.op.core.RefExit; +import org.tensorflow.op.core.RefIdentity; +import org.tensorflow.op.core.RefMerge; import org.tensorflow.op.core.RefNextIteration; import org.tensorflow.op.core.RefSelect; import org.tensorflow.op.core.RefSwitch; @@ -206,12 +238,15 @@ import org.tensorflow.op.core.ScatterMul; import org.tensorflow.op.core.ScatterNd; import org.tensorflow.op.core.ScatterNdAdd; +import org.tensorflow.op.core.ScatterNdMax; +import org.tensorflow.op.core.ScatterNdMin; import org.tensorflow.op.core.ScatterNdNonAliasingAdd; import org.tensorflow.op.core.ScatterNdSub; import org.tensorflow.op.core.ScatterNdUpdate; import org.tensorflow.op.core.ScatterSub; import org.tensorflow.op.core.ScatterUpdate; import org.tensorflow.op.core.Select; +import org.tensorflow.op.core.Send; import org.tensorflow.op.core.SetDiff1d; import org.tensorflow.op.core.SetSize; import org.tensorflow.op.core.ShapeN; @@ -224,6 +259,10 @@ import org.tensorflow.op.core.SplitV; import org.tensorflow.op.core.Squeeze; import org.tensorflow.op.core.Stack; +import org.tensorflow.op.core.StackClose; +import org.tensorflow.op.core.StackCreate; +import org.tensorflow.op.core.StackPop; +import org.tensorflow.op.core.StackPush; import org.tensorflow.op.core.Stage; import org.tensorflow.op.core.StageClear; import org.tensorflow.op.core.StagePeek; @@ -232,8 +271,10 @@ import org.tensorflow.op.core.StatefulIf; import org.tensorflow.op.core.StatefulPartitionedCall; import org.tensorflow.op.core.StatefulWhile; +import org.tensorflow.op.core.StatelessCase; import org.tensorflow.op.core.StatelessIf; import org.tensorflow.op.core.StatelessWhile; +import org.tensorflow.op.core.StochasticCastToInt; import org.tensorflow.op.core.StopGradient; import org.tensorflow.op.core.StridedSlice; import org.tensorflow.op.core.StridedSliceAssign; @@ -241,6 +282,7 @@ import org.tensorflow.op.core.StridedSliceHelper; import org.tensorflow.op.core.Sum; import org.tensorflow.op.core.SwitchCond; +import org.tensorflow.op.core.SyncDevice; import org.tensorflow.op.core.TemporaryVariable; import org.tensorflow.op.core.TensorArray; import org.tensorflow.op.core.TensorArrayClose; @@ -290,11 +332,13 @@ import org.tensorflow.op.core.TopKWithUnique; import org.tensorflow.op.core.Unbatch; import org.tensorflow.op.core.UnbatchGrad; +import org.tensorflow.op.core.UniformQuantizedClipByValue; import org.tensorflow.op.core.Unique; import org.tensorflow.op.core.UniqueWithCounts; import org.tensorflow.op.core.UnravelIndex; import org.tensorflow.op.core.Unstack; import org.tensorflow.op.core.Unstage; +import org.tensorflow.op.core.UpperBound; import org.tensorflow.op.core.VarHandleOp; import org.tensorflow.op.core.VarIsInitializedOp; import org.tensorflow.op.core.Variable; @@ -348,35 +392,23 @@ public final class Ops { public final NnOps nn; - public final SummaryOps summary; - - public final ImageOps image; - - public final RaggedOps ragged; + public final ClusterOps cluster; public final DataOps data; - public final ShapeOps shape; - - public final IoOps io; - - public final DtypesOps dtypes; - - public final XlaOps xla; - - public final LinalgOps linalg; + public final MathOps math; public final RandomOps random; public final StringsOps strings; - public final SparseOps sparse; + public final BitwiseOps bitwise; - public final TpuOps tpu; + public final DebuggingOps debugging; - public final BitwiseOps bitwise; + public final CollectiveOps collective; - public final MathOps math; + public final DistributeOps distribute; public final AudioOps audio; @@ -386,30 +418,54 @@ public final class Ops { public final QuantizationOps quantization; + public final SummaryOps summary; + + public final ImageOps image; + + public final RaggedOps ragged; + + public final ShapeOps shape; + + public final IoOps io; + + public final DtypesOps dtypes; + + public final LinalgOps linalg; + + public final XlaOps xla; + + public final SparseOps sparse; + + public final TpuOps tpu; + private final Scope scope; Ops(Scope scope) { this.scope = scope; nn = new NnOps(this); + cluster = new ClusterOps(this); + data = new DataOps(this); + math = new MathOps(this); + random = new RandomOps(this); + strings = new StringsOps(this); + bitwise = new BitwiseOps(this); + debugging = new DebuggingOps(this); + collective = new CollectiveOps(this); + distribute = new DistributeOps(this); + audio = new AudioOps(this); + signal = new SignalOps(this); + train = new TrainOps(this); + quantization = new QuantizationOps(this); summary = new SummaryOps(this); image = new ImageOps(this); ragged = new RaggedOps(this); - data = new DataOps(this); shape = new ShapeOps(this); io = new IoOps(this); dtypes = new DtypesOps(this); - xla = new XlaOps(this); linalg = new LinalgOps(this); - random = new RandomOps(this); - strings = new StringsOps(this); + xla = new XlaOps(this); sparse = new SparseOps(this); tpu = new TpuOps(this); - bitwise = new BitwiseOps(this); - math = new MathOps(this); - audio = new AudioOps(this); - signal = new SignalOps(this); - train = new TrainOps(this); - quantization = new QuantizationOps(this); } /** @@ -442,6 +498,105 @@ public All all(Operand input, Operand axis, All.Option return All.create(scope, input, axis, options); } + /** + * Creates a uninitialized anonymous hash table. + * This op creates a new anonymous hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Before using the table you will have + * to initialize it. After initialization the table will be + * immutable. The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param data type for {@code AnonymousHashTable} output and operands + * @param data type for {@code AnonymousHashTable} output and operands + * @return a new instance of AnonymousHashTable + */ + public AnonymousHashTable anonymousHashTable(Class keyDtype, + Class valueDtype) { + return AnonymousHashTable.create(scope, keyDtype, valueDtype); + } + + /** + * Creates an empty anonymous mutable hash table that uses tensors as the backing store. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a scalar. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + *

It uses "open addressing" with quadratic reprobing to resolve + * collisions. + *

The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + * + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey The deletedKey value + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for {@code AnonymousMutableDenseHashTable} output and operands + * @param data type for {@code AnonymousMutableDenseHashTable} output and operands + * @return a new instance of AnonymousMutableDenseHashTable + */ + public AnonymousMutableDenseHashTable anonymousMutableDenseHashTable( + Operand emptyKey, Operand deletedKey, Class valueDtype, + AnonymousMutableDenseHashTable.Options... options) { + return AnonymousMutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); + } + + /** + * Creates an empty anonymous mutable hash table. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a scalar. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param data type for {@code AnonymousMutableHashTable} output and operands + * @param data type for {@code AnonymousMutableHashTable} output and operands + * @return a new instance of AnonymousMutableHashTable + */ + public AnonymousMutableHashTable anonymousMutableHashTable( + Class keyDtype, Class valueDtype) { + return AnonymousMutableHashTable.create(scope, keyDtype, valueDtype); + } + + /** + * Creates an empty anonymous mutable hash table of vector values. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a vector. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for {@code AnonymousMutableHashTableOfTensors} output and operands + * @param data type for {@code AnonymousMutableHashTableOfTensors} output and operands + * @return a new instance of AnonymousMutableHashTableOfTensors + */ + public AnonymousMutableHashTableOfTensors anonymousMutableHashTableOfTensors( + Class keyDtype, Class valueDtype, + AnonymousMutableHashTableOfTensors.Options... options) { + return AnonymousMutableHashTableOfTensors.create(scope, keyDtype, valueDtype, options); + } + /** * Computes the "logical or" of elements across dimensions of a tensor. * Reduces {@code input} along the dimensions given in {@code axis}. Unless @@ -464,7 +619,6 @@ public Any any(Operand input, Operand axis, Any.Option * See https://arxiv.org/abs/2206.14286 for the algorithm details. * This op is only optimized on TPU currently. * - * @param data type for {@code values} output * @param input Array to search. Must be at least 1-D of the floating type * @param k Specifies the number of min/max-k. * @param options carries optional attribute values @@ -477,72 +631,72 @@ public ApproxTopK approxTopK(Operand input, Long k, } /** - * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * Creates a constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. - * @return the {@code String} constant + * @return a long constant */ - public Constant array(String... data) { + public Constant array(long... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code int} elements. + * Creates a constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. * @return a float constant */ - public Constant array(int... data) { + public Constant array(float... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code double} elements. + * Creates a constant of {@code String} elements, using the default UTF-8 charset. * * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return the {@code String} constant */ - public Constant array(double... data) { + public Constant array(String... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code long} elements. + * Creates a constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. - * @return a long constant + * @return a float constant */ - public Constant array(long... data) { + public Constant array(int... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code byte} elements. + * Creates a constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a double constant */ - public Constant array(byte... data) { + public Constant array(double... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code boolean} elements. + * Creates a constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a byte constant */ - public Constant array(boolean... data) { + public Constant array(byte... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code float} elements. + * Creates a constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a boolean constant */ - public Constant array(float... data) { + public Constant array(boolean... data) { return Constant.arrayOf(scope, data); } @@ -578,7 +732,6 @@ public AssertThat assertThat(Operand condition, Iterable> data * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. May be uninitialized. * @param value The value to be assigned to the variable. * @param options carries optional attribute values @@ -595,7 +748,6 @@ public Assign assign(Operand ref, Operand value, * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param value The value to be added to the variable. * @param options carries optional attribute values @@ -626,7 +778,6 @@ public AssignAddVariableOp assignAddVariableOp(Operand resource * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param value The value to be subtracted to the variable. * @param options carries optional attribute values @@ -873,7 +1024,6 @@ public BatchFunction batchFunction(Iterable> inTensors, * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, * followed by cropping along the {@code height} and {@code width} dimensions. * - * @param data type for {@code output} output * @param input 4-D tensor with shape * {@code [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]}. Note that the batch size of the input tensor must be divisible by * {@code block_size * block_size}. @@ -901,7 +1051,6 @@ public BatchToSpace batchToSpace(Operand input, * optionally cropped according to {@code crops} to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. * - * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has M dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. @@ -1067,7 +1216,6 @@ public BatchToSpaceNd batchToSpaceNd(Operand input, * buffer is made on BE machines when types are of different sizes in order to get * the same casting results as on LE machines. * - * @param data type for {@code output} output * @param input The input value * @param type The value of the type attribute * @param data type for {@code Bitcast} output and operands @@ -1138,7 +1286,6 @@ public Operand booleanMaskUpdate(Operand tensor, Operand * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. * - * @param data type for {@code r0} output * @param s0 The s0 value * @param s1 The s1 value * @param data type for {@code BroadcastArgs} output and operands @@ -1149,6 +1296,20 @@ public BroadcastDynamicShape broadcastDynamicShape(Operan return BroadcastDynamicShape.create(scope, s0, s1); } + /** + * Return the reduction indices for computing gradients of s0 op s1 with broadcast. + * This is typically used by gradient computations for a broadcasting operation. + * + * @param s0 The s0 value + * @param s1 The s1 value + * @param data type for {@code BroadcastGradientArgs} output and operands + * @return a new instance of BroadcastGradientArgs + */ + public BroadcastGradientArgs broadcastGradientArgs(Operand s0, + Operand s1) { + return BroadcastGradientArgs.create(scope, s0, s1); + } + /** * Broadcast an array for a compatible shape. * Broadcasting is the process of making arrays to have compatible shapes @@ -1188,7 +1349,6 @@ public BroadcastDynamicShape broadcastDynamicShape(Operan * shape. (In a graph context, {@code broadcast_to} might be fused to * subsequent operation and then be optimized away, however.) * - * @param data type for {@code output} output * @param input A Tensor to broadcast. * @param shape An 1-D {@code int} Tensor. The shape of the desired output. * @param data type for {@code BroadcastTo} output and operands @@ -1282,6 +1442,22 @@ public Case caseOp(Operand branchIndex, Iterable> input, return Case.create(scope, branchIndex, input, Tout, branches, options); } + /** + * Checks whether a tensor is located in host memory pinned for GPU. + * When run: + *

    + *
  • Reports an {@code InvalidArgument} error if {@code tensor} is not in pinned memory.
  • + *
  • Reports a {@code FailedPrecondition} error if not built with CUDA.
  • + *
+ * + * @param tensor The tensor value + * @param data type for {@code CheckPinned} output and operands + * @return a new instance of CheckPinned + */ + public CheckPinned checkPinned(Operand tensor) { + return CheckPinned.create(scope, tensor); + } + /** * Clips tensor values to a specified min and max. * Given a tensor {@code t}, this operation returns a tensor of the same type and @@ -1289,7 +1465,6 @@ public Case caseOp(Operand branchIndex, Iterable> input, * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values * greater than {@code clip_value_max} are set to {@code clip_value_max}. * - * @param data type for {@code output} output * @param t A {@code Tensor}. * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape * as {@code t}. The minimum value to clip by. @@ -1303,10 +1478,42 @@ public ClipByValue clipByValue(Operand t, Operand cli return ClipByValue.create(scope, t, clipValueMin, clipValueMax); } + /** + * Encodes an {@code ExtensionType} value into a {@code variant} scalar Tensor. + * Returns a scalar variant tensor containing a single {@code CompositeTensorVariant} + * with the specified Tensor components and TypeSpec. + * + * @param components The component tensors for the extension type value. + * @param metadata String serialization for the TypeSpec. (Note: the encoding for the TypeSpec + * may change in future versions of TensorFlow.) + * @return a new instance of CompositeTensorVariantFromComponents + */ + public CompositeTensorVariantFromComponents compositeTensorVariantFromComponents( + Iterable> components, String metadata) { + return CompositeTensorVariantFromComponents.create(scope, components, metadata); + } + + /** + * Decodes a {@code variant} scalar Tensor into an {@code ExtensionType} value. + * Returns the Tensor components encoded in a {@code CompositeTensorVariant}. + *

Raises an error if {@code type_spec_proto} doesn't match the TypeSpec + * in {@code encoded}. + * + * @param encoded A scalar {@code variant} Tensor containing an encoded ExtensionType value. + * @param metadata String serialization for the TypeSpec. Must be compatible with the + * {@code TypeSpec} contained in {@code encoded}. (Note: the encoding for the TypeSpec + * may change in future versions of TensorFlow.) + * @param Tcomponents Expected dtypes for components. + * @return a new instance of CompositeTensorVariantToComponents + */ + public CompositeTensorVariantToComponents compositeTensorVariantToComponents( + Operand encoded, String metadata, List> Tcomponents) { + return CompositeTensorVariantToComponents.create(scope, encoded, metadata, Tcomponents); + } + /** * Concatenates tensors along one dimension. * - * @param data type for {@code output} output * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except {@code concat_dim}. * @param axis 0-D. The dimension along which to concatenate. Must be in the @@ -1319,6 +1526,33 @@ public Concat concat(Iterable> values, return Concat.create(scope, values, axis); } + /** + * Computes offsets of concat inputs within its output. + * For example: + *

+ *
+ *
+ *

x = [2, 2, 7] + * y = [2, 3, 7] + * z = [2, 9, 7] + * offsets = concat_offset(1, [x, y, z]) + * [[a.item() for a in list(off.numpy())] for off in offsets] + * [[0, 0, 0], [0, 2, 0], [0, 5, 0]] + *

+ *
+ *
+ *

This is typically used by gradient computations for a concat operation. + * + * @param concatDim The dimension along which to concatenate. + * @param shape The {@code N} int32 or int64 vectors representing shape of tensors being concatenated. + * @param data type for {@code ConcatOffset} output and operands + * @return a new instance of ConcatOffset + */ + public ConcatOffset concatOffset(Operand concatDim, + Iterable> shape) { + return ConcatOffset.create(scope, concatDim, shape); + } + /** * Creates a constant containing a single {@code int} element. * @@ -1351,17 +1585,6 @@ public Constant constant(byte[][][][][] data) { return Constant.tensorOf(scope, data); } - /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. - * - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant - */ - public Constant constant(NdArray data) { - return Constant.tensorOf(scope, data); - } - /** * Creates a rank-4 constant of {@code int} elements. * @@ -1383,17 +1606,6 @@ public Constant constant(byte data) { return Constant.scalarOf(scope, data); } - /** - * Creates a rank-2 constant of {@code long} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a long constant - */ - public Constant constant(long[][] data) { - return Constant.tensorOf(scope, data); - } - /** * Creates a rank-6 constant of {@code float} elements. * @@ -1427,28 +1639,6 @@ public Constant constant(boolean[][][][] data) { return Constant.tensorOf(scope, data); } - /** - * Creates a rank-3 constant of {@code float} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a float constant - */ - public Constant constant(float[][][] data) { - return Constant.tensorOf(scope, data); - } - - /** - * Creates a rank-5 constant of {@code float} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a float constant - */ - public Constant constant(float[][][][][] data) { - return Constant.tensorOf(scope, data); - } - /** * Creates a rank-5 constant of {@code long} elements. * @@ -1472,217 +1662,304 @@ public Constant constant(int[] data) { } /** - * Creates a rank-2 constant of {@code float} elements. + * Creates a rank-2 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant */ - public Constant constant(float[][] data) { + public Constant constant(boolean[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data an n-dimensional array of {@code boolean} elements. * @return a boolean constant */ - public Constant constant(boolean[][] data) { + public Constant constant(BooleanNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code double} element. + * Creates a rank-1 constant of {@code double} elements. * - * @param data The value to put into the new constant. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. * @return a double constant */ - public Constant constant(double data) { - return Constant.scalarOf(scope, data); + public Constant constant(double[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a constant containing a single {@code boolean} element. + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. * - * @param data The value to put into the new constant. - * @return a boolean constant + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant */ - public Constant constant(boolean data) { - return Constant.scalarOf(scope, data); + public Constant constant(LongNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code long} element. + * Creates a rank-3 constant of {@code long} elements. * - * @param data The value to put into the new constant. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. * @return a long constant */ - public Constant constant(long data) { - return Constant.scalarOf(scope, data); + public Constant constant(long[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a rank-1 constant of {@code byte} elements. * - * @param data The string to put into the new constant. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public Constant constant(String data) { - return Constant.scalarOf(scope, data); + public Constant constant(byte[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param data an n-dimensional array of {@code boolean} elements. - * @return a boolean constant + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public Constant constant(BooleanNdArray data) { + public Constant constant(FloatNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code double} elements. + * Creates a rank-5 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return an integer constant */ - public Constant constant(double[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(int[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * Creates a rank-5 constant of {@code double} elements. * - * @param data an n-dimensional array of {@code long} elements. - * @return a long constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public Constant constant(LongNdArray data) { + public Constant constant(double[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code float} elements. + * Creates a rank-5 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant + */ + public Constant constant(boolean[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant containing a single {@code float} element. + * + * @param data The value to put into the new constant. * @return a float constant */ - public Constant constant(float[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(float data) { + return Constant.scalarOf(scope, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Creates a rank-2 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a byte constant */ - public Constant constant(long[][][] data) { + public Constant constant(byte[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Creates a rank-2 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a double constant */ - public Constant constant(boolean[][][] data) { + public Constant constant(double[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code byte} elements. + * Creates a rank-3 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant */ - public Constant constant(byte[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(byte[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code int} elements. + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(NdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a long constant */ - public Constant constant(int[][][] data) { + public Constant constant(long[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * Creates a rank-3 constant of {@code float} elements. * - * @param data an n-dimensional array of {@code int} elements. - * @return an integer constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public Constant constant(IntNdArray data) { + public Constant constant(float[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code long} elements. + * Creates a rank-5 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a float constant */ - public Constant constant(long[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(float[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * Creates a rank-2 constant of {@code float} elements. * - * @param data an n-dimensional array of {@code float} elements. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. * @return a float constant */ - public Constant constant(FloatNdArray data) { + public Constant constant(float[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code int} elements. + * Creates a constant containing a single {@code double} element. + * + * @param data The value to put into the new constant. + * @return a double constant + */ + public Constant constant(double data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code boolean} element. + * + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant constant(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code long} element. + * + * @param data The value to put into the new constant. + * @return a long constant + */ + public Constant constant(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param data The string to put into the new constant. + * @return a string constant + */ + public Constant constant(String data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][][][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-5 constant of {@code double} elements. + * Creates a rank-3 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a boolean constant */ - public Constant constant(double[][][][][] data) { + public Constant constant(boolean[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code boolean} elements. + * Creates a rank-3 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return an integer constant */ - public Constant constant(boolean[][][][][] data) { + public Constant constant(int[][][] data) { return Constant.tensorOf(scope, data); } + /** + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant + */ + public Constant constant(IntNdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code long} elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant constant(long[] data) { + return Constant.vectorOf(scope, data); + } + /** * Creates a rank-6 constant of {@code int} elements. * @@ -1748,16 +2025,6 @@ public Constant constant(boolean[] data) { return Constant.vectorOf(scope, data); } - /** - * Creates a constant containing a single {@code float} element. - * - * @param data The value to put into the new constant. - * @return a float constant - */ - public Constant constant(float data) { - return Constant.scalarOf(scope, data); - } - /** * Creates a rank-4 constant of {@code byte} elements. * @@ -1812,39 +2079,6 @@ public Constant constant(long[][][][] data) { return Constant.tensorOf(scope, data); } - /** - * Creates a rank-2 constant of {@code byte} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant - */ - public Constant constant(byte[][] data) { - return Constant.tensorOf(scope, data); - } - - /** - * Creates a rank-2 constant of {@code double} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a double constant - */ - public Constant constant(double[][] data) { - return Constant.tensorOf(scope, data); - } - - /** - * Creates a rank-3 constant of {@code byte} elements. - * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant - */ - public Constant constant(byte[][][] data) { - return Constant.tensorOf(scope, data); - } - /** * Creates a rank-4 constant of {@code double} elements. * @@ -1952,26 +2186,26 @@ public Constant constant(Shape shape, IntDataBuffer data) { } /** - * Create a {@link TInt64} constant with data from the given buffer. + * Create a {@link TFloat64} constant with data from the given buffer. * * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a long constant + * @return a double constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, LongDataBuffer data) { + public Constant constant(Shape shape, DoubleDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TFloat64} constant with data from the given buffer. + * Create a {@link TInt64} constant with data from the given buffer. * * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a double constant + * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, DoubleDataBuffer data) { + public Constant constant(Shape shape, LongDataBuffer data) { return Constant.tensorOf(scope, shape, data); } @@ -2032,11 +2266,7 @@ public Constant constant(Class type, Shape shape, ByteDa /** * Create a constant by making an immutable copy of {@code tensor}. {@code tensor} may be closed - * afterwards without issue. - * - *

Note: this endpoint cannot be simply called {@code constant} since it will conflict with - * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, - * FloatNdArray)}}. + * afterward without issue. * * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` @@ -2088,7 +2318,6 @@ public ControlTrigger controlTrigger() { /** * The CopyToMesh operation * - * @param data type for {@code output} output * @param input The input value * @param mesh The value of the mesh attribute * @param data type for {@code CopyToMesh} output and operands @@ -2101,7 +2330,6 @@ public CopyToMesh copyToMesh(Operand input, String mesh) /** * The CopyToMeshGrad operation * - * @param data type for {@code output} output * @param input The input value * @param forwardInput The forwardInput value * @param data type for {@code CopyToMeshGrad} output and operands @@ -2115,7 +2343,6 @@ public CopyToMeshGrad copyToMeshGrad(Operand input, /** * Increments 'ref' until it reaches 'limit'. * - * @param data type for {@code output} output * @param ref Should be from a scalar {@code Variable} node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. @@ -2210,7 +2437,6 @@ public DecodeProto decodeProto(Operand bytes, String messageType, /** * Makes a copy of {@code x}. * - * @param data type for {@code y} output * @param x The source tensor of type {@code T}. * @param data type for {@code DeepCopy} output and operands * @return a new instance of DeepCopy @@ -2252,7 +2478,6 @@ public DestroyResourceOp destroyResourceOp(Operand resource, * using control dependencies. *

Outputs the final value of the tensor pointed to by 'ref'. * - * @param data type for {@code value} output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching * 'TemporaryVariable' op. @@ -2264,6 +2489,29 @@ public DestroyTemporaryVariable destroyTemporaryVariable(Op return DestroyTemporaryVariable.create(scope, ref, varName); } + /** + * Return the index of device the op runs. + * Given a list of device names, this operation returns the index of the device + * this op runs. The length of the list is returned in two cases: + * (1) Device does not exist in the given device list. + * (2) It is in XLA compilation. + * + * @param deviceNames The value of the deviceNames attribute + * @return a new instance of DeviceIndex + */ + public DeviceIndex deviceIndex(List deviceNames) { + return DeviceIndex.create(scope, deviceNames); + } + + /** + * The DummyMemoryCache operation + * + * @return a new instance of DummyMemoryCache + */ + public DummyMemoryCache dummyMemoryCache() { + return DummyMemoryCache.create(scope); + } + /** * Partitions {@code data} into {@code num_partitions} tensors using indices from {@code partitions}. * For each index tuple {@code js} of size {@code partitions.ndim}, the slice {@code data[js, ...]} @@ -2307,7 +2555,6 @@ public DestroyTemporaryVariable destroyTemporaryVariable(Op * * * - * @param data type for {@code outputs} output * @param data The data value * @param partitions Any shape. Indices in the range {@code [0, num_partitions)}. * @param numPartitions The number of partitions to output. @@ -2338,7 +2585,7 @@ public DynamicPartition dynamicPartition(Operand data, * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this * {@code constant}, the output shape is *

-   *  merged.shape = [max(indices)] + constant
+   *  merged.shape = [max(indices) + 1] + constant
    *  
*

Values are merged in order, so if an index appears in both {@code indices[m][i]} and * {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the @@ -2375,7 +2622,6 @@ public DynamicPartition dynamicPartition(Operand data, * * * - * @param data type for {@code merged} output * @param indices The indices value * @param data The data value * @param data type for {@code DynamicStitch} output and operands @@ -2419,7 +2665,6 @@ public EditDistance editDistance(Operand hypothesisInd * Creates a tensor with the given shape. *

This operation creates a tensor of {@code shape} and {@code dtype}. * - * @param data type for {@code output} output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype The value of the dtype attribute * @param options carries optional attribute values @@ -2521,18 +2766,48 @@ public EncodeProto encodeProto(Operand sizes, Iterable> value } /** - * Ensures that the tensor's shape matches the expected shape. - * Raises an error if the input tensor's shape does not match the specified shape. - * Returns the input tensor otherwise. + * Ensures that the tensor's shape matches the expected shape. + * Raises an error if the input tensor's shape does not match the specified shape. + * Returns the input tensor otherwise. + * + * @param input A tensor, whose shape is to be validated. + * @param shape The expected (possibly partially specified) shape of the input tensor. + * @param data type for {@code EnsureShape} output and operands + * @return a new instance of EnsureShape + */ + public EnsureShape ensureShape(Operand input, Shape shape) { + return EnsureShape.create(scope, input, shape); + } + + /** + * Creates or finds a child frame, and makes {@code data} available to the child frame. + * This op is used together with {@code Exit} to create loops in the graph. + * The unique {@code frame_name} is used by the {@code Executor} to identify frames. If + * {@code is_constant} is true, {@code output} is a constant in the child frame; otherwise + * it may be changed in the child frame. At most {@code parallel_iterations} iterations + * are run in parallel in the child frame. + * + * @param data The tensor to be made available to the child frame. + * @param frameName The name of the child frame. + * @param options carries optional attribute values + * @param data type for {@code Enter} output and operands + * @return a new instance of Enter + */ + public Enter enter(Operand data, String frameName, + Enter.Options... options) { + return Enter.create(scope, data, frameName, options); + } + + /** + * Exits the current frame to its parent frame. + * Exit makes its input {@code data} available to the parent frame. * - * @param data type for {@code output} output - * @param input A tensor, whose shape is to be validated. - * @param shape The expected (possibly partially specified) shape of the input tensor. - * @param data type for {@code EnsureShape} output and operands - * @return a new instance of EnsureShape + * @param data The tensor to be made available to the parent frame. + * @param data type for {@code Exit} output and operands + * @return a new instance of Exit */ - public EnsureShape ensureShape(Operand input, Shape shape) { - return EnsureShape.create(scope, input, shape); + public Exit exit(Operand data) { + return Exit.create(scope, data); } /** @@ -2561,7 +2836,6 @@ public EnsureShape ensureShape(Operand input, Shape shap *

This operation is related to {@code squeeze()}, which removes dimensions of * size 1. * - * @param data type for {@code output} output * @param input The input value * @param axis 0-D (scalar). Specifies the dimension index at which to * expand the shape of {@code input}. Must be in the range @@ -2577,7 +2851,6 @@ public ExpandDims expandDims(Operand input, /** * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output dimension. 3D extension of {@code extract_image_patches}. * - * @param data type for {@code patches} output * @param input 5-D Tensor with shape {@code [batch, in_planes, in_rows, in_cols, depth]}. * @param ksizes The size of the sliding window for each dimension of {@code input}. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in @@ -2596,6 +2869,37 @@ public ExtractVolumePatches extractVolumePatches(Operand< return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); } + /** + * This op is used as a placeholder in If branch functions. It doesn't provide a + * valid output when run, so must either be removed (e.g. replaced with a + * function input) or guaranteed not to be used (e.g. if mirroring an + * intermediate output needed for the gradient computation of the other branch). + * + * @param dtype The type of the output. + * @param shape

+   *  The purported shape of the output. This is only used for shape inference;
+   *  the output will not necessarily have this shape. Can be a partial shape.
+   *  
+ * @param data type for {@code FakeParam} output and operands + * @return a new instance of FakeParam + */ + public FakeParam fakeParam(Class dtype, Shape shape) { + return FakeParam.create(scope, dtype, shape); + } + + /** + * Set configuration of the file system. + * + * @param scheme File system scheme. + * @param key The name of the configuration option. + * @param value The value of the configuration option. + * @return a new instance of FileSystemSetConfiguration + */ + public FileSystemSetConfiguration fileSystemSetConfiguration(Operand scheme, + Operand key, Operand value) { + return FileSystemSetConfiguration.create(scope, scheme, key, value); + } + /** * Creates a tensor filled with a scalar value. * This operation creates a tensor of shape {@code dims} and fills it with {@code value}. @@ -2616,7 +2920,6 @@ public ExtractVolumePatches extractVolumePatches(Operand< * based on other runtime Tensors, unlike {@code tf.constant}. * * - * @param data type for {@code output} output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. *

{@literal @}compatibility(numpy)
@@ -2710,9 +3013,11 @@ public For forOp(Operand start, Operand limit, Operand d *

Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. + *

Note that on TPU, if any dimension of {@code params} is of size 0 then the output will + * be the expected shape filled with zeros. On CPU and GPU an error will be + * returned. *

See also {@code tf.batch_gather} and {@code tf.gather_nd}. * - * @param data type for {@code output} output * @param params The tensor from which to gather values. Must be at least rank * {@code axis + 1}. * @param indices Index tensor. Must be in range {@code [0, params.shape[axis])}. @@ -2750,9 +3055,17 @@ public Gather gather(Operand params, Operand * indices.shape[:-1] + params.shape[indices.shape[-1]:] * - *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + *

If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

    + *
  1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
  2. + *
  3. "ERROR": raises error; GPU does not support this value.
  4. + *
  5. "IGNORE": ignore error and set the corresponding output to 0; + * supported on both CPU and GPU.
  6. + *
*

Some examples below. *

Simple indexing into a matrix: *

@@ -2819,15 +3132,39 @@ public  Gather gather(Operand params, Operand
    *  

See also {@code tf.gather} and {@code tf.batch_gather}. * - * @param data type for {@code output} output * @param params The tensor from which to gather values. * @param indices Index tensor. + * @param options carries optional attribute values * @param data type for {@code GatherNd} output and operands * @return a new instance of GatherNd */ public GatherNd gatherNd(Operand params, - Operand indices) { - return GatherNd.create(scope, params, indices); + Operand indices, GatherNd.Options... options) { + return GatherNd.create(scope, params, indices, options); + } + + /** + * Gets the element at the specified index in a dataset. + * + * @param dataset The dataset value + * @param index The index value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GetElementAtIndex + */ + public GetElementAtIndex getElementAtIndex(Operand dataset, + Operand index, List> outputTypes, List outputShapes) { + return GetElementAtIndex.create(scope, dataset, index, outputTypes, outputShapes); + } + + /** + * Returns the {@code tf.data.Options} attached to {@code input_dataset}. + * + * @param inputDataset A variant tensor representing the input dataset. + * @return a new instance of GetOptions + */ + public GetOptions getOptions(Operand inputDataset) { + return GetOptions.create(scope, inputDataset); } /** @@ -2843,7 +3180,6 @@ public GetSessionHandle getSessionHandle(Operand value) { /** * Get the value of the tensor specified by its handle. * - * @param data type for {@code value} output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. * @param data type for {@code GetSessionTensor} output and operands @@ -2908,7 +3244,6 @@ public Gradients gradients(Iterable> y, IterableReturns the input tensor without modification. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code GuaranteeConst} output and operands * @return a new instance of GuaranteeConst @@ -2952,7 +3287,6 @@ public HashTable hashTable(Class keyDtype, * sess.run(hist) => [2, 1, 1, 0, 2] *

* - * @param data type for {@code out} output * @param values Numeric {@code Tensor}. * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. * values <= value_range[0] will be mapped to hist[0], @@ -2983,7 +3317,6 @@ public HistogramFixedWidth histogramFixedWidth(Opera * sess.run(hist) => [2, 1, 1, 0, 2] * * - * @param data type for {@code out} output * @param values Numeric {@code Tensor}. * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. * values <= value_range[0] will be mapped to hist[0], @@ -2999,10 +3332,21 @@ public HistogramFixedWidth histogramFi return HistogramFixedWidth.create(scope, values, valueRange, nbins, dtype); } + /** + * Returns a constant tensor on the host. Only for writing C++ tests. + * + * @param value Attr {@code value} is the tensor to return. + * @param dtype The value of the dtype attribute + * @param data type for {@code HostConst} output and operands + * @return a new instance of HostConst + */ + public HostConst hostConst(Tensor value, Class dtype) { + return HostConst.create(scope, value, dtype); + } + /** * Return a tensor with the same shape and contents as the input tensor or value. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Identity} output and operands * @return a new instance of Identity @@ -3070,7 +3414,6 @@ public If ifOp(Operand cond, Iterable> input, * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. * - * @param data type for {@code tensor} output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see @@ -3130,7 +3473,6 @@ public InitializeTableFromTextFile initializeTableFromTextFile( * Computes y = x; y[i, :] += v; return y. * * - * @param data type for {@code y} output * @param x A {@code Tensor} of type T. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3148,7 +3490,6 @@ public InplaceAdd inplaceAdd(Operand x, Operand * Computes y = x; y[i, :] -= v; return y. * * - * @param data type for {@code y} output * @param x A {@code Tensor} of type T. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3165,7 +3506,6 @@ public InplaceSub inplaceSub(Operand x, Operand *

Originally this function is mutative however for compilation we make this * operation create / operate on a copy of {@code x}. * - * @param data type for {@code y} output * @param x A tensor of type {@code T}. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3213,11 +3553,30 @@ public KthOrderStatistic kthOrderStatistic(Operand input, Long k) { return KthOrderStatistic.create(scope, input, k); } + /** + * Generates values in an interval. + * A sequence of {@code num} evenly-spaced values are generated beginning at {@code start}. + * If {@code num > 1}, the values in the sequence increase by + * {@code (stop - start) / (num - 1)}, so that the last one is exactly {@code stop}. + *

For example: + *

+   *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
+   *  
+ * + * @param start 0-D tensor. First entry in the range. + * @param stop 0-D tensor. Last entry in the range. + * @param num 0-D tensor. Number of values to generate. + * @param data type for {@code LinSpace} output and operands + * @return a new instance of LinSpace + */ + public LinSpace linSpace(Operand start, Operand stop, + Operand num) { + return LinSpace.create(scope, start, stop, num); + } + /** * Outputs all keys and values in the table. * - * @param data type for {@code keys} output - * @param data type for {@code values} output * @param tableHandle Handle to the table. * @param Tkeys The value of the Tkeys attribute * @param Tvalues The value of the Tvalues attribute @@ -3237,7 +3596,6 @@ public LookupTableExport lookupTableExp *

The scalar {@code default_value} is the value output for keys not present in the * table. It must also be of the same type as the table values. * - * @param data type for {@code values} output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param defaultValue The defaultValue value @@ -3279,6 +3637,20 @@ public LookupTableInsert lookupTableInsert(Operand tableHandle, return LookupTableInsert.create(scope, tableHandle, keys, values); } + /** + * Removes keys and its associated values from a table. + * The tensor {@code keys} must of the same type as the keys of the table. Keys not + * already in the table are silently ignored. + * + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys of the elements to remove. + * @return a new instance of LookupTableRemove + */ + public LookupTableRemove lookupTableRemove(Operand tableHandle, + Operand keys) { + return LookupTableRemove.create(scope, tableHandle, keys); + } + /** * Computes the number of elements in the given table. * @@ -3301,6 +3673,62 @@ public LoopCond loopCond(Operand input) { return LoopCond.create(scope, input); } + /** + * Applies lower_bound(sorted_search_values, values) along each row. + * Each set of rows with the same index in (sorted_inputs, values) is treated + * independently. The resulting row is the equivalent of calling + * {@code np.searchsorted(sorted_inputs, values, side='left')}. + *

The result is not a global index to the entire + * {@code Tensor}, but rather just the index in the last dimension. + *

A 2-D example: + * sorted_sequence = [[0, 3, 9, 9, 10], + * [1, 2, 3, 4, 5]] + * values = [[2, 4, 9], + * [0, 2, 6]] + *

result = LowerBound(sorted_sequence, values) + *

result == [[1, 2, 2], + * [0, 1, 5]] + * + * @param sortedInputs 2-D Tensor where each row is ordered. + * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains + * the values that will be searched for in {@code sorted_search_values}. + * @param data type for {@code LowerBound} output and operands + * @return a new instance of LowerBound, with default output types + */ + public LowerBound lowerBound(Operand sortedInputs, + Operand values) { + return LowerBound.create(scope, sortedInputs, values); + } + + /** + * Applies lower_bound(sorted_search_values, values) along each row. + * Each set of rows with the same index in (sorted_inputs, values) is treated + * independently. The resulting row is the equivalent of calling + * {@code np.searchsorted(sorted_inputs, values, side='left')}. + *

The result is not a global index to the entire + * {@code Tensor}, but rather just the index in the last dimension. + *

A 2-D example: + * sorted_sequence = [[0, 3, 9, 9, 10], + * [1, 2, 3, 4, 5]] + * values = [[2, 4, 9], + * [0, 2, 6]] + *

result = LowerBound(sorted_sequence, values) + *

result == [[1, 2, 2], + * [0, 1, 5]] + * + * @param sortedInputs 2-D Tensor where each row is ordered. + * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains + * the values that will be searched for in {@code sorted_search_values}. + * @param outType The value of the outType attribute + * @param data type for {@code LowerBound} output and operands + * @param data type for {@code LowerBound} output and operands + * @return a new instance of LowerBound + */ + public LowerBound lowerBound(Operand sortedInputs, + Operand values, Class outType) { + return LowerBound.create(scope, sortedInputs, values, outType); + } + /** * Make all elements in the non-Batch dimension unique, but "close" to * their initial value. Never returns a sub-normal number. Never returns @@ -3326,6 +3754,38 @@ public MapClear mapClear(List> dtypes, MapClear.Options.. return MapClear.create(scope, dtypes, options); } + /** + * Maps a function on the list of tensors unpacked from arguments on dimension 0. + * The function given by {@code f} is assumed to be stateless, and is executed + * concurrently on all the slices; up to batch_size (i.e. the size of the 0th + * dimension of each argument) functions will be scheduled at once. + *

The {@code max_intra_op_parallelism} attr, which defaults to 1, can be used to + * limit the intra op parallelism. To limit inter-op parallelism, a user can + * set a private threadpool on the dataset using {@code tf.data.Options}'s + * {@code ThreadingOptions}. + *

Note that this op is not exposed to users directly, but is invoked in tf.data + * rewrites. + * + * @param arguments

+   *  A list of tensors whose types are `Targuments`, corresponding to the inputs
+   *  the function should be mapped over.
+   *  
+ * @param capturedInputs
+   *  A list of tensors whose types are `Tcaptured`, corresponding to the captured
+   *  inputs of the defun.
+   *  
+ * @param outputTypes A list of types. + * @param outputShapes A list of shapes. + * @param f The value of the f attribute + * @param options carries optional attribute values + * @return a new instance of MapDefun + */ + public MapDefun mapDefun(Iterable> arguments, Iterable> capturedInputs, + List> outputTypes, List outputShapes, ConcreteFunction f, + MapDefun.Options... options) { + return MapDefun.create(scope, arguments, capturedInputs, outputTypes, outputShapes, f, options); + } + /** * Op returns the number of incomplete elements in the underlying container. * @@ -3420,7 +3880,6 @@ public MapUnstageNoKey mapUnstageNoKey(Operand indices, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -3440,7 +3899,6 @@ public Max max(Operand input, Operand{@code Merge} forwards the first tensor to become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. * - * @param data type for {@code output} output * @param inputs The input tensors, exactly one of which will become available. * @param data type for {@code Merge} output and operands * @return a new instance of Merge @@ -3456,7 +3914,6 @@ public Merge merge(Iterable> inputs) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -3493,7 +3950,6 @@ public Min min(Operand input, Operand * - * @param data type for {@code output} output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. @@ -3510,6 +3966,35 @@ public MirrorPad mirrorPad(Operand input, return MirrorPad.create(scope, input, paddings, mode); } + /** + * Gradient op for {@code MirrorPad} op. This op folds a mirror-padded tensor. + * This operation folds the padded areas of {@code input} by {@code MirrorPad} according to the + * {@code paddings} you specify. {@code paddings} must be the same as {@code paddings} argument + * given to the corresponding {@code MirrorPad} op. + *

The folded size of each dimension D of the output is: + *

{@code input.dim_size(D) - paddings(D, 0) - paddings(D, 1)} + *

For example: + *

+   *  # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
+   *  # 'paddings' is [[0, 1]], [0, 1]].
+   *  # 'mode' is SYMMETRIC.
+   *  # rank of 't' is 2.
+   *  pad(t, paddings) ==> [[ 1,  5]
+   *                        [11, 28]]
+   *  
+ * + * @param input The input tensor to be folded. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of {@code input}. + * @param mode The mode used in the {@code MirrorPad} op. + * @param data type for {@code MirrorPadGrad} output and operands + * @return a new instance of MirrorPadGrad + */ + public MirrorPadGrad mirrorPadGrad(Operand input, + Operand paddings, String mode) { + return MirrorPadGrad.create(scope, input, paddings, mode); + } + /** * Wraps an arbitrary MLIR computation expressed as a module with a main() function. * This operation does not have an associated kernel and is not intended to be @@ -3663,10 +4148,77 @@ public MutexLock mutexLock(Operand mutex) { return MutexLock.create(scope, mutex); } + /** + * Outputs a tensor containing the reduction across all input tensors. + * Outputs a tensor containing the reduction across all input tensors passed to ops + * within the same `shared_name. + *

The graph should be constructed so if one op runs with shared_name value {@code c}, + * then {@code num_devices} ops will run with shared_name value {@code c}. Failure to do so + * will cause the graph execution to fail to complete. + *

input: the input to the reduction + * data: the value of the reduction across all {@code num_devices} devices. + * reduction: the reduction operation to perform. + * num_devices: The number of devices participating in this reduction. + * shared_name: Identifier that shared between ops of the same reduction. + * + * @deprecated use {@link org.tensorflow.op.distribute.NcclAllReduce} instead + * @param input The input value + * @param reduction The value of the reduction attribute + * @param numDevices The value of the numDevices attribute + * @param sharedName The value of the sharedName attribute + * @param data type for {@code NcclAllReduce} output and operands + * @return a new instance of NcclAllReduce + */ + @Deprecated + public NcclAllReduce ncclAllReduce(Operand input, String reduction, + Long numDevices, String sharedName) { + return NcclAllReduce.create(scope, input, reduction, numDevices, sharedName); + } + + /** + * Sends {@code input} to all devices that are connected to the output. + * Sends {@code input} to all devices that are connected to the output. + *

The graph should be constructed so that all ops connected to the output have a + * valid device assignment, and the op itself is assigned one of these devices. + *

input: The input to the broadcast. + * output: The same as input. + * shape: The shape of the input tensor. + * + * @deprecated use {@link org.tensorflow.op.distribute.NcclBroadcast} instead + * @param input The input value + * @param shape The value of the shape attribute + * @param data type for {@code NcclBroadcast} output and operands + * @return a new instance of NcclBroadcast + */ + @Deprecated + public NcclBroadcast ncclBroadcast(Operand input, Shape shape) { + return NcclBroadcast.create(scope, input, shape); + } + + /** + * Reduces {@code input} from {@code num_devices} using {@code reduction} to a single device. + * Reduces {@code input} from {@code num_devices} using {@code reduction} to a single device. + *

The graph should be constructed so that all inputs have a valid device + * assignment, and the op itself is assigned one of these devices. + *

input: The input to the reduction. + * data: the value of the reduction across all {@code num_devices} devices. + * reduction: the reduction operation to perform. + * + * @deprecated use {@link org.tensorflow.op.distribute.NcclReduce} instead + * @param input The input value + * @param reduction The value of the reduction attribute + * @param data type for {@code NcclReduce} output and operands + * @return a new instance of NcclReduce + */ + @Deprecated + public NcclReduce ncclReduce(Iterable> input, + String reduction) { + return NcclReduce.create(scope, input, reduction); + } + /** * Makes its input available to the next iteration. * - * @param data type for {@code output} output * @param data The tensor to be made available to the next iteration. * @param data type for {@code NextIteration} output and operands * @return a new instance of NextIteration @@ -3761,7 +4313,6 @@ public NoOp noOp() { * ] * * - * @param data type for {@code output} output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. * @param onValue A scalar defining the value to fill in output when {@code indices[j] = i}. @@ -3790,7 +4341,6 @@ public Ones ones(Operand dims, Class /** * Returns a tensor of ones with the same shape and type as x. * - * @param data type for {@code y} output * @param x a tensor of type T. * @param data type for {@code OnesLike} output and operands * @return a new instance of OnesLike @@ -3924,7 +4474,6 @@ public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, * [0, 0, 0, 0, 0, 0]] * * - * @param data type for {@code output} output * @param input The input value * @param paddings The paddings value * @param constantValues The constantValues value @@ -3952,7 +4501,6 @@ public Pad pad(Operand input, Operand * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. * - * @param data type for {@code output} output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. * @param shape the final shape of the result; should be equal to the shapes of any input @@ -4020,7 +4568,6 @@ public ParallelConcat parallelConcat(Iterable> v * * * - * @param data type for {@code merged} output * @param indices The indices value * @param data The data value * @param data type for {@code ParallelDynamicStitch} output and operands @@ -4059,7 +4606,6 @@ public PartitionedCall partitionedCall(Iterable> args, * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param data type for {@code output} output * @param dtype The type of elements in the tensor. * @param options carries optional attribute values * @param data type for {@code Placeholder} output and operands @@ -4073,7 +4619,6 @@ public Placeholder placeholder(Class dtype, /** * A placeholder op that passes through {@code input} when its output is not fed. * - * @param data type for {@code output} output * @param input The default value to produce when {@code output} is not fed. * @param shape The (possibly partial) shape of the tensor. * @param data type for {@code PlaceholderWithDefault} output and operands @@ -4103,7 +4648,6 @@ public Print print(Operand input, Print.Options... options) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4119,7 +4663,6 @@ public Prod prod(Operand input, Operand data type for {@code output} output * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param inputMin The minimum value of the input. @@ -4139,7 +4682,6 @@ public QuantizedReshape quantizedReshape(Operand tensor, * first dimension must match. *

The outputs are deterministic. * - * @param data type for {@code output} output * @param index A scalar tensor or a vector of dtype {@code dtype}. The index (or indices) to be shuffled. Must be within [0, max_index]. * @param seed A tensor of dtype {@code Tseed} and shape [3] or [n, 3]. The random seed. * @param maxIndex A scalar tensor or vector of dtype {@code dtype}. The upper bound(s) of the interval (inclusive). @@ -4164,7 +4706,6 @@ public RandomIndexShuffle randomIndexShuffle(Operand i * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * * - * @param data type for {@code output} output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. * @param delta 0-D (scalar). Optional. Default is 1. Number that increments {@code start}. @@ -4203,7 +4744,6 @@ public Rank rank(Operand input) { * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param data type for {@code value} output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. * @param data type for {@code ReadVariableOp} output and operands @@ -4214,6 +4754,23 @@ public ReadVariableOp readVariableOp(Operand data type for {@code Recv} output and operands + * @return a new instance of Recv + */ + public Recv recv(Class tensorType, String tensorName, String sendDevice, + Long sendDeviceIncarnation, String recvDevice, Recv.Options... options) { + return Recv.create(scope, tensorType, tensorName, sendDevice, sendDeviceIncarnation, recvDevice, options); + } + /** * Computes the "logical and" of elements across dimensions of a tensor. * Reduces {@code input} along the dimensions given in {@code axis}. Unless @@ -4257,7 +4814,6 @@ public ReduceAny reduceAny(Operand input, Operand axis * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4277,7 +4833,6 @@ public ReduceMax reduceMax(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4297,7 +4852,6 @@ public ReduceMin reduceMin(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4317,7 +4871,6 @@ public ReduceProd reduceProd(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4330,10 +4883,65 @@ public ReduceSum reduceSum(Operand input, Operand data type for {@code RefEnter} output and operands + * @return a new instance of RefEnter + */ + public RefEnter refEnter(Operand data, String frameName, + RefEnter.Options... options) { + return RefEnter.create(scope, data, frameName, options); + } + + /** + * Exits the current frame to its parent frame. + * Exit makes its input {@code data} available to the parent frame. + * + * @param data The tensor to be made available to the parent frame. + * @param data type for {@code RefExit} output and operands + * @return a new instance of RefExit + */ + public RefExit refExit(Operand data) { + return RefExit.create(scope, data); + } + + /** + * Return the same ref tensor as the input ref tensor. + * + * @param input The input value + * @param data type for {@code RefIdentity} output and operands + * @return a new instance of RefIdentity + */ + public RefIdentity refIdentity(Operand input) { + return RefIdentity.create(scope, input); + } + + /** + * Forwards the value of an available tensor from {@code inputs} to {@code output}. + * {@code Merge} waits for at least one of the tensors in {@code inputs} to become available. + * It is usually combined with {@code Switch} to implement branching. + *

{@code Merge} forwards the first tensor for become available to {@code output}, and sets + * {@code value_index} to its index in {@code inputs}. + * + * @param inputs The input tensors, exactly one of which will become available. + * @param data type for {@code RefMerge} output and operands + * @return a new instance of RefMerge + */ + public RefMerge refMerge(Iterable> inputs) { + return RefMerge.create(scope, inputs); + } + /** * Makes its input available to the next iteration. * - * @param data type for {@code output} output * @param data The tensor to be made available to the next iteration. * @param data type for {@code RefNextIteration} output and operands * @return a new instance of RefNextIteration @@ -4345,7 +4953,6 @@ public RefNextIteration refNextIteration(Operand data) { /** * Forwards the {@code index}th element of {@code inputs} to {@code output}. * - * @param data type for {@code output} output * @param index A scalar that determines the input that gets selected. * @param inputs A list of ref tensors, one of which will be forwarded to {@code output}. * @param data type for {@code RefSelect} output and operands @@ -4362,7 +4969,6 @@ public RefSelect refSelect(Operand index, * the data goes to {@code output_false}. *

See also {@code Switch} and {@code Merge}. * - * @param data type for {@code output_false} output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. * @param data type for {@code RefSwitch} output and operands @@ -4375,7 +4981,6 @@ public RefSwitch refSwitch(Operand data, Operand /** * The Relayout operation * - * @param data type for {@code output} output * @param input The input value * @param layout The value of the layout attribute * @param data type for {@code Relayout} output and operands @@ -4388,7 +4993,6 @@ public Relayout relayout(Operand input, String layout) { /** * The RelayoutLike operation * - * @param data type for {@code output} output * @param input The input value * @param layoutInput The layoutInput value * @param data type for {@code RelayoutLike} output and operands @@ -4470,7 +5074,6 @@ public RemoteCall remoteCall(Operand target, Iterable> args, * reshape(t, []) ==> 7 * * - * @param data type for {@code output} output * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param data type for {@code Reshape} output and operands @@ -4483,7 +5086,6 @@ public Reshape reshape(Operand tensor, Operand data type for {@code output} output * @param resource Should be from a scalar {@code Variable} node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. @@ -4511,7 +5113,6 @@ public ResourceCountUpTo resourceCountUpTo( * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * * - * @param data type for {@code output} output * @param resource The resource value * @param indices The indices value * @param dtype The value of the dtype attribute @@ -4527,7 +5128,6 @@ public ResourceGather resourceGather(Operand data type for {@code output} output * @param resource The resource value * @param indices The indices value * @param dtype The value of the dtype attribute @@ -4973,7 +5573,6 @@ public ResourceStridedSliceAssign resourceStridedSliceAssign * [12, 13, 14, 15]]]] * * - * @param data type for {@code output} output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range * {@code [-rank(tensor), rank(tensor))}. @@ -5035,7 +5634,6 @@ public Reverse reverse(Operand tensor, Operand * - * @param data type for {@code output} output * @param input The input to reverse. * @param seqLengths 1-D with length {@code input.dims(batch_dim)} and * {@code max(seq_lengths) <= input.dims(seq_dim)} @@ -5070,7 +5668,6 @@ public ReverseSequence reverseSequence(Operand input, * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * * - * @param data type for {@code output} output * @param input The input value * @param shift Dimension must be 0-D or 1-D. {@code shift[i]} specifies the number of places by which * elements are shifted positively (towards larger indices) along the dimension @@ -5110,7 +5707,6 @@ public Roll roll(Operand input, Operand * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. @@ -5142,7 +5738,6 @@ public ScatterAdd scatterAdd(Operand ref, * the same location, their contributions divide. *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of values that {@code ref} is divided by. @@ -5177,7 +5772,6 @@ public ScatterDiv scatterDiv(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to reduce into {@code ref}. @@ -5212,7 +5806,6 @@ public ScatterMax scatterMax(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to reduce into {@code ref}. @@ -5244,7 +5837,6 @@ public ScatterMin scatterMin(Operand ref, * the same location, their contributions multiply. *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to multiply to {@code ref}. @@ -5330,20 +5922,28 @@ public ScatterMul scatterMul(Operand ref, * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]] * - *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + *

If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

    + *
  1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
  2. + *
  3. "ERROR": raises error; GPU does not support this value.
  4. + *
  5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
  6. + *
* - * @param data type for {@code output} output * @param indices Tensor of indices. * @param updates Values to scatter into the output tensor. * @param shape 1-D. The shape of the output tensor. + * @param options carries optional attribute values * @param data type for {@code ScatterNd} output and operands * @param data type for {@code ScatterNd} output and operands * @return a new instance of ScatterNd */ public ScatterNd scatterNd(Operand indices, - Operand updates, Operand shape) { - return ScatterNd.create(scope, indices, updates, shape); + Operand updates, Operand shape, ScatterNd.Options... options) { + return ScatterNd.create(scope, indices, updates, shape, options); } /** @@ -5375,7 +5975,6 @@ public ScatterNd scatterNd(Operand in *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5390,6 +5989,40 @@ public ScatterNdAdd scatterNdAdd(Operand ref, return ScatterNdAdd.create(scope, ref, indices, updates, options); } + /** + * Computes element-wise maximum. + * + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. + * @param options carries optional attribute values + * @param data type for {@code ScatterNdMax} output and operands + * @return a new instance of ScatterNdMax + */ + public ScatterNdMax scatterNdMax(Operand ref, + Operand indices, Operand updates, ScatterNdMax.Options... options) { + return ScatterNdMax.create(scope, ref, indices, updates, options); + } + + /** + * Computes element-wise minimum. + * + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. + * @param options carries optional attribute values + * @param data type for {@code ScatterNdMin} output and operands + * @return a new instance of ScatterNdMin + */ + public ScatterNdMin scatterNdMin(Operand ref, + Operand indices, Operand updates, ScatterNdMin.Options... options) { + return ScatterNdMin.create(scope, ref, indices, updates, options); + } + /** * Applies sparse addition to {@code input} using individual values or slices * from {@code updates} according to indices {@code indices}. The updates are non-aliasing: @@ -5420,18 +6053,19 @@ public ScatterNdAdd scatterNdAdd(Operand ref, * *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * - * @param data type for {@code output} output * @param input A Tensor. * @param indices A Tensor. Must be one of the following types: {@code int32}, {@code int64}. * A tensor of indices into {@code input}. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to add to {@code input}. + * @param options carries optional attribute values * @param data type for {@code ScatterNdNonAliasingAdd} output and operands * @return a new instance of ScatterNdNonAliasingAdd */ public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd(Operand input, - Operand indices, Operand updates) { - return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); + Operand indices, Operand updates, + ScatterNdNonAliasingAdd.Options... options) { + return ScatterNdNonAliasingAdd.create(scope, input, indices, updates, options); } /** @@ -5464,7 +6098,6 @@ public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd(Oper *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5508,7 +6141,6 @@ public ScatterNdSub scatterNdSub(Operand ref, * slices. *

See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5544,7 +6176,6 @@ public ScatterNdUpdate scatterNdUpdate(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to subtract from {@code ref}. @@ -5581,7 +6212,6 @@ public ScatterSub scatterSub(Operand ref, * *

See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to store in {@code ref}. @@ -5597,7 +6227,6 @@ public ScatterUpdate scatterUpdate(Operand ref, /** * The SelectV2 operation * - * @param data type for {@code output} output * @param condition The condition value * @param t The t value * @param e The e value @@ -5608,6 +6237,22 @@ public Select select(Operand condition, Operand t return Select.create(scope, condition, t, e); } + /** + * Sends the named tensor from send_device to recv_device. + * + * @param tensor The tensor to send. + * @param tensorName The name of the tensor to send. + * @param sendDevice The name of the device sending the tensor. + * @param sendDeviceIncarnation The current incarnation of send_device. + * @param recvDevice The name of the device receiving the tensor. + * @param options carries optional attribute values + * @return a new instance of Send + */ + public Send send(Operand tensor, String tensorName, String sendDevice, + Long sendDeviceIncarnation, String recvDevice, Send.Options... options) { + return Send.create(scope, tensor, tensorName, sendDevice, sendDeviceIncarnation, recvDevice, options); + } + /** * Computes the difference between two lists of numbers or strings. * Given a list {@code x} and a list {@code y}, this operation returns a list {@code out} that @@ -5627,8 +6272,6 @@ public Select select(Operand condition, Operand t * idx ==> [1, 3, 5] * * - * @param data type for {@code out} output - * @param data type for {@code idx} output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param data type for {@code ListDiff} output and operands @@ -5657,8 +6300,6 @@ public SetDiff1d setDiff1d(Operand x, Operand * idx ==> [1, 3, 5] * * - * @param data type for {@code out} output - * @param data type for {@code idx} output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param outIdx The value of the outIdx attribute @@ -5700,7 +6341,6 @@ public SetSize setSize(Operand setIndices, Operand setV * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Shape, with default output types */ @@ -5717,7 +6357,6 @@ public org.tensorflow.op.core.Shape shape(Operand input * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code Shape} output and operands @@ -5732,7 +6371,6 @@ public org.tensorflow.op.core.Shape shape(Operand data type for {@code output} output * @param input The input value * @return a new instance of ShapeN, with default output types */ @@ -5744,7 +6382,6 @@ public ShapeN shapeN(Iterable> input) { * Returns shape of tensors. * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code ShapeN} output and operands @@ -5765,7 +6402,6 @@ public ShapeN shapeN(Iterable> i * size(t) ==> 12 * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Size, with default output types */ @@ -5783,7 +6419,6 @@ public Size size(Operand input) { * size(t) ==> 12 * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code Size} output and operands @@ -5813,7 +6448,6 @@ public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... op *

Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * @param data type for {@code output} output * @param input The input value * @param begin begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. @@ -5833,7 +6467,6 @@ public Slice slice(Operand input, Ope /** * Returns a copy of the input tensor. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Snapshot} output and operands * @return a new instance of Snapshot @@ -5941,7 +6574,6 @@ public Snapshot snapshot(Operand input) { *

Among others, this operation is useful for reducing atrous convolution into * regular convolution. * - * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has {@code M} dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. @@ -5960,7 +6592,6 @@ public SpaceToBatchNd spaceToBatchNd(Operand input, /** * Splits a tensor into {@code num_split} tensors along one dimension. * - * @param data type for {@code output} output * @param axis 0-D. The dimension along which to split. Must be in the range * {@code [-rank(value), rank(value))}. * @param value The tensor to split. @@ -5976,7 +6607,6 @@ public Split split(Operand axis, Operand value, /** * Splits a tensor into {@code num_split} tensors along one dimension. * - * @param data type for {@code output} output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split * dimension. Must sum to the dimension of value along split_dim. @@ -6009,7 +6639,6 @@ public SplitV splitV(Operand value, Operand * - * @param data type for {@code output} output * @param input The {@code input} to squeeze. * @param options carries optional attribute values * @param data type for {@code Squeeze} output and operands @@ -6037,7 +6666,6 @@ public Squeeze squeeze(Operand input, Squeeze.Options... * *

This is the opposite of {@code unpack}. * - * @param data type for {@code output} output * @param values Must be of same shape and type. * @param options carries optional attribute values * @param data type for {@code Pack} output and operands @@ -6047,6 +6675,58 @@ public Stack stack(Iterable> values, Stack.Optio return Stack.create(scope, values, options); } + /** + * Delete the stack from its resource container. + * + * @param handle The handle to a stack. + * @return a new instance of StackClose + */ + public StackClose stackClose(Operand handle) { + return StackClose.create(scope, handle); + } + + /** + * A stack that produces elements in first-in last-out order. + * + * @param maxSize The maximum size of the stack if non-negative. If negative, the stack + * size is unlimited. + * @param elemType The type of the elements on the stack. + * @param options carries optional attribute values + * @param data type for {@code StackV2} output and operands + * @return a new instance of StackCreate + */ + public StackCreate stackCreate(Operand maxSize, Class elemType, + StackCreate.Options... options) { + return StackCreate.create(scope, maxSize, elemType, options); + } + + /** + * Pop the element at the top of the stack. + * + * @param handle The handle to a stack. + * @param elemType The type of the elem that is popped. + * @param data type for {@code StackPopV2} output and operands + * @return a new instance of StackPop + */ + public StackPop stackPop(Operand handle, + Class elemType) { + return StackPop.create(scope, handle, elemType); + } + + /** + * Push an element onto the stack. + * + * @param handle The handle to a stack. + * @param elem The tensor to be pushed onto the stack. + * @param options carries optional attribute values + * @param data type for {@code StackPushV2} output and operands + * @return a new instance of StackPush + */ + public StackPush stackPush(Operand handle, Operand elem, + StackPush.Options... options) { + return StackPush.create(scope, handle, elem, options); + } + /** * Stage values similar to a lightweight Enqueue. * The basic functionality of this Op is similar to a queue with many @@ -6212,6 +6892,44 @@ public StatefulWhile statefulWhile(Iterable> input, ConcreteFunction return StatefulWhile.create(scope, input, cond, body, options); } + /** + * An n-way switch statement which calls a single branch function. + *

+   *  An n-way switch statement, implementing the following:
+   *  ```
+   *  switch (branch_index) {
+   *    case 0:
+   *      output = branches[0](input);
+   *      break;
+   *    case 1:
+   *      output = branches[1](input);
+   *      break;
+   *    ...
+   *    case [[nbranches-1]]:
+   *    default:
+   *      output = branches[nbranches-1](input);
+   *      break;
+   *  }
+   *  ```
+   *
+   *  This should only be used when the none of branches has stateful ops.
+   *  
+ * + * @param branchIndex The branch selector, an int32 Tensor. + * @param input A list of input tensors passed to the branch function. + * @param Tout A list of output types. + * @param branches
+   *    A list of functions each of which takes 'inputs' and returns a list of
+   *    tensors, whose types are the same as what every other branch returns.
+   *  
+ * @param options carries optional attribute values + * @return a new instance of StatelessCase + */ + public StatelessCase statelessCase(Operand branchIndex, Iterable> input, + List> Tout, List branches, Case.Options... options) { + return StatelessCase.create(scope, branchIndex, input, Tout, branches, options); + } + /** * output = cond ? then_branch(input) : else_branch(input) * @@ -6274,6 +6992,25 @@ public StatelessWhile statelessWhile(Iterable> input, ConcreteFunctio return StatelessWhile.create(scope, input, cond, body, options); } + /** + * Stochastically cast a given tensor from floats to ints. + * The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN. + *

The outputs are a deterministic function of {@code input}, {@code key}, {@code counter}, {@code alg}. + * + * @param input The operand to stochastically cast to int. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param Tout The type of the output. + * @param data type for {@code StochasticCastToInt} output and operands + * @return a new instance of StochasticCastToInt + */ + public StochasticCastToInt stochasticCastToInt( + Operand input, Operand key, + Operand counter, Operand alg, Class Tout) { + return StochasticCastToInt.create(scope, input, key, counter, alg, Tout); + } + /** * Stops gradient computation. * When executed in a graph, this op outputs its input tensor as-is. @@ -6327,7 +7064,6 @@ public StatelessWhile statelessWhile(Iterable> input, ConcreteFunctio * example generation process. * * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code StopGradient} output and operands * @return a new instance of StopGradient @@ -6345,16 +7081,17 @@ public StopGradient stopGradient(Operand input) { * equal to `n`, but this need not be the case. Each range specification entry can be one of the * following: * - *

- An ellipsis (...) using {@link Indices#ellipsis()}. Ellipses are used to imply zero or - * more dimensions of full-dimension selection. For example, {@code stridedSlice(foo, - * Indices.ellipsis()} is the identity slice. + *

- An ellipsis (...) using {@link org.tensorflow.ndarray.index.Indices#ellipsis()}. Ellipses + * are used to imply zero or more dimensions of full-dimension selection. For example, {@code + * stridedSlice(foo, Indices.ellipsis()} is the identity slice. * - *

- A new axis using {@link Indices#newAxis()}. This is used to insert a new shape=1 - * dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} where {@code foo} is - * shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. + *

- A new axis using {@link org.tensorflow.ndarray.index.Indices#newAxis()}. This is used to + * insert a new shape=1 dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} + * where {@code foo} is shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. * - *

- A range {@code begin:end:stride} using {@link Indices#slice(Long, Long, long)} - * Index.slice()} or {@link Indices#all()}. This is used to specify how much to choose from a + *

- A range {@code begin:end:stride} using {@link + * org.tensorflow.ndarray.index.Indices#slice(Long, Long, long)} Index.slice()} or {@link + * org.tensorflow.ndarray.index.Indices#all()}. This is used to specify how much to choose from a * given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer which * represents the index of the first value to select while {@code end} represents the index of the * last value to select (exclusive). Begin and end can be null, in which case the index begins or @@ -6371,10 +7108,11 @@ public StopGradient stopGradient(Operand input) { * elements). For example {@code foo = [1,2,3,4]; stridedSlice(foo, Indices.slice(-2, null, -1)} * is {@code [4,3]}. * - *

- A single index using {@link Indices#at(long)}. This is used to keep only elements that - * have a given index. For example ({@code stridedSlice(foo, Indices.at(2))} on a shape {@code - * (5,6)} tensor produces a shape {@code (6,)} tensor. The dimension can be kept with size one - * using {@link Indices#at(long, boolean)}. + *

- A single index using {@link org.tensorflow.ndarray.index.Indices#at(long)}. This is used + * to keep only elements that have a given index. For example ({@code stridedSlice(foo, + * Indices.at(2))} on a shape {@code (5,6)} tensor produces a shape {@code (6,)} tensor. The + * dimension can be kept with size one using {@link org.tensorflow.ndarray.index.Indices#at(long, + * boolean)}. * *

These semantics generally follow NumPy's indexing semantics, which can be found here: https://numpy.org/doc/stable/reference/arrays.indexing.html @@ -6382,9 +7120,9 @@ public StopGradient stopGradient(Operand input) { *

Requirements: `0 != strides[i] for i in [0, m)` Only one ellipsis. * * @param data type for {@code output()} output - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSlice - * @see Indices + * @see org.tensorflow.ndarray.index.Indices */ public StridedSlice stridedSlice(Operand input, Index... indices) { return StridedSliceHelper.stridedSlice(scope, input, indices); @@ -6490,7 +7228,6 @@ public StridedSlice stridedSlice(Operand input, Index... * {@code 0 != strides[i] for i in [0, m)} * {@code ellipsis_mask must be a power of two (only one ellipsis)} * - * @param data type for {@code output} output * @param input The input value * @param begin {@code begin[k]} specifies the offset into the {@code k}th range specification. * The exact dimension this corresponds to will be determined by context. @@ -6527,9 +7264,10 @@ public StridedSlice stridedSlice(Operand * @param data type for {@code outputRef()} output * @param ref the tensor to assign to. * @param value the value to assign. - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSliceAssign - * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) + * @see org.tensorflow.op.Ops#stridedSlice(org.tensorflow.Operand, + * org.tensorflow.ndarray.index.Index...) */ public StridedSliceAssign stridedSliceAssign(Operand ref, Operand value, Index... indices) { @@ -6544,7 +7282,6 @@ public StridedSliceAssign stridedSliceAssign(Operand ref *

NOTE this op currently does not support broadcasting and so {@code value}'s * shape must be exactly the shape produced by the slice of {@code ref}. * - * @param data type for {@code output_ref} output * @param ref The ref value * @param begin The begin value * @param end The end value @@ -6571,7 +7308,6 @@ public StridedSliceAssign stridedSliceAs * {@code dy} is the input gradient to be propagated and {@code shape} is the * shape of {@code StridedSlice}'s {@code input}. * - * @param data type for {@code output} output * @param shape The shape value * @param begin The begin value * @param end The end value @@ -6595,7 +7331,6 @@ public StridedSliceGrad stridedSliceGrad * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -6614,7 +7349,6 @@ public Sum sum(Operand input, Operand * the data goes to {@code output_false}. *

See also {@code RefSwitch} and {@code Merge}. * - * @param data type for {@code output_false} output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. * @param data type for {@code Switch} output and operands @@ -6624,6 +7358,17 @@ public SwitchCond switchCond(Operand data, Operand SwitchCond switchCond(Operand data, Operand data type for {@code ref} output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values @@ -6689,7 +7433,6 @@ public TensorArrayClose tensorArrayClose(Operand handle) { * *

All elements must have the same shape (excepting the first dimension). * - * @param data type for {@code value} output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. @@ -6706,7 +7449,6 @@ public TensorArrayConcat tensorArrayConcat(Operand data type for {@code value} output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. @@ -6787,7 +7529,6 @@ public TensorArrayGradWithShape tensorArrayGradWithShape(Operand data type for {@code value} output * @param handle The handle value * @param flowIn The flowIn value * @param dtype The value of the dtype attribute @@ -6803,7 +7544,6 @@ public TensorArrayPack tensorArrayPack(Operand han /** * Read an element from the TensorArray into output {@code value}. * - * @param data type for {@code value} output * @param handle The handle to a TensorArray. * @param index The index value * @param flowIn A float scalar that enforces proper chaining of operations. @@ -6915,7 +7655,6 @@ public TensorArrayWrite tensorArrayWrite(Operand handle, Operan * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param leadingDims The leadingDims value @@ -6948,7 +7687,6 @@ public TensorListConcatLists tensorListConcatLists( * input_handle: the list * element_shape: the shape of elements of the list * - * @param data type for {@code element_shape} output * @param inputHandle The inputHandle value * @param shapeType The value of the shapeType attribute * @param data type for {@code TensorListElementShape} output and operands @@ -6982,7 +7720,6 @@ public TensorListFromTensor tensorListFromTensor(Operand tensor * indices: The indices used to index into the list. * values: The tensor. * - * @param data type for {@code values} output * @param inputHandle The inputHandle value * @param indices The indices value * @param elementShape The elementShape value @@ -7002,7 +7739,6 @@ public TensorListGather tensorListGather( * index: the position in the list from which an element will be retrieved * item: the element at that position * - * @param data type for {@code item} output * @param inputHandle The inputHandle value * @param index The index value * @param elementShape The elementShape value @@ -7036,7 +7772,6 @@ public TensorListLength tensorListLength(Operand inputHandle) { * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param elementDtype The value of the elementDtype attribute @@ -7198,7 +7933,6 @@ public TensorListSplit tensorListSplit(Operand tensor, * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param elementDtype The value of the elementDtype attribute @@ -7266,7 +8000,6 @@ public TensorMapInsert tensorMapInsert(Operand inputHandle, * key: the key to be looked up * value: the value found from the given key * - * @param data type for {@code value} output * @param inputHandle The inputHandle value * @param key The key value * @param valueDtype The value of the valueDtype attribute @@ -7295,7 +8028,6 @@ public TensorMapSize tensorMapSize(Operand inputHandle) { * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param data type for {@code keys} output * @param inputHandle The inputHandle value * @param keyDtype The value of the keyDtype attribute * @param data type for {@code TensorMapStackKeys} output and operands @@ -7366,19 +8098,28 @@ public TensorMapStackKeys tensorMapStackKeys( * * * - *

Note: on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + *

If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

    + *
  1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
  2. + *
  3. "ERROR": raises error; GPU does not support this value.
  4. + *
  5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
  6. + *
* - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterAdd} output and operands * @return a new instance of TensorScatterNdAdd */ public TensorScatterNdAdd tensorScatterNdAdd(Operand tensor, - Operand indices, Operand updates) { - return TensorScatterNdAdd.create(scope, tensor, indices, updates); + Operand indices, Operand updates, + TensorScatterNdAdd.Options... options) { + return TensorScatterNdAdd.create(scope, tensor, indices, updates, options); } /** @@ -7398,31 +8139,33 @@ public TensorScatterNdAdd tensorScatterNdAdd(Operand ten * *

Refer to {@code tf.tensor_scatter_nd_update} for more details. * - * @param data type for {@code output} output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterMax} output and operands * @return a new instance of TensorScatterNdMax */ public TensorScatterNdMax tensorScatterNdMax(Operand tensor, - Operand indices, Operand updates) { - return TensorScatterNdMax.create(scope, tensor, indices, updates); + Operand indices, Operand updates, + TensorScatterNdMax.Options... options) { + return TensorScatterNdMax.create(scope, tensor, indices, updates, options); } /** * The TensorScatterMin operation * - * @param data type for {@code output} output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterMin} output and operands * @return a new instance of TensorScatterNdMin */ public TensorScatterNdMin tensorScatterNdMin(Operand tensor, - Operand indices, Operand updates) { - return TensorScatterNdMin.create(scope, tensor, indices, updates); + Operand indices, Operand updates, + TensorScatterNdMin.Options... options) { + return TensorScatterNdMin.create(scope, tensor, indices, updates, options); } /** @@ -7483,16 +8226,17 @@ public TensorScatterNdMin tensorScatterNdMin(Operand ten *

Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterSub} output and operands * @return a new instance of TensorScatterNdSub */ public TensorScatterNdSub tensorScatterNdSub(Operand tensor, - Operand indices, Operand updates) { - return TensorScatterNdSub.create(scope, tensor, indices, updates); + Operand indices, Operand updates, + TensorScatterNdSub.Options... options) { + return TensorScatterNdSub.create(scope, tensor, indices, updates, options); } /** @@ -7503,7 +8247,6 @@ public TensorScatterNdSub tensorScatterNdSub(Operand ten * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. *

If {@code indices} contains duplicates, then we pick the last update for the index. - *

If an out of bound index is found on CPU, an error is returned. *

WARNING: There are some GPU specific semantics for this operation. *

    *
  • If an out of bound index is found, the index is ignored.
  • @@ -7525,18 +8268,29 @@ public TensorScatterNdSub tensorScatterNdSub(Operand ten *
        *  indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
        *  
    + *

    If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

      + *
    1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
    2. + *
    3. "ERROR": raises error; GPU does not support this value.
    4. + *
    5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
    6. + *
    *

    For usage examples see the python tf.tensor_scatter_nd_update {@link org.tensorflow.op.Ops#tensorScatterNdUpdate} function * - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterUpdate} output and operands * @return a new instance of TensorScatterNdUpdate */ public TensorScatterNdUpdate tensorScatterNdUpdate(Operand tensor, - Operand indices, Operand updates) { - return TensorScatterNdUpdate.create(scope, tensor, indices, updates); + Operand indices, Operand updates, + TensorScatterNdUpdate.Options... options) { + return TensorScatterNdUpdate.create(scope, tensor, indices, updates, options); } /** @@ -7547,7 +8301,6 @@ public TensorScatterNdUpdate tensorScatterNdUpdate(Operand< *

    NOTE this op currently does not support broadcasting and so {@code value}'s shape * must be exactly the shape produced by the slice of {@code input}. * - * @param data type for {@code output} output * @param input The input value * @param begin The begin value * @param end The end value @@ -7598,8 +8351,7 @@ public TensorStridedSliceUpdate tensorSt * * * - * @param data type for {@code output} output - * @param input 1-D or higher. + * @param input Can be of any rank. * @param multiples 1-D. Length must be the same as the number of dimensions in {@code input} * @param data type for {@code Tile} output and operands * @return a new instance of Tile @@ -7687,7 +8439,6 @@ public TopKWithUnique topKWithUnique(Operand input, Long k) { * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. * - * @param data type for {@code unbatched_tensor} output * @param batchedTensor The batchedTensor value * @param batchIndex The batchIndex value * @param id The id value @@ -7717,7 +8468,6 @@ public Unbatch unbatch(Operand batchedTensor, Operand data type for {@code batched_grad} output * @param originalInput The originalInput value * @param batchIndex The batchIndex value * @param grad The grad value @@ -7732,6 +8482,34 @@ public UnbatchGrad unbatchGrad(Operand originalInput, return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); } + /** + * Perform clip by value on the quantized Tensor {@code operand}. + * Given quantized {@code operand} which was quantized using {@code scales} and {@code zero_points}, performs clip by value using {@code min} and {@code max} values. + * If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. + * Otherwise (per-channel quantized), the clipping is also done per-channel. + * + * @param operand Must be a Tensor of T. + * @param min The min value(s) to clip operand. Must be a Tensor of T. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + * @param max The min value(s) to clip operand. Must be a Tensor of T. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + * @param scales The float value(s) used as scale(s) when quantizing {@code operand}, {@code min} and {@code max}. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + * @param zeroPoints The int32 value(s) used as zero_point(s) when quantizing {@code operand}, {@code min} and {@code max}. + * Same shape condition as scales. + * @param quantizationMinVal The quantization min value that was used when operand was quantized. + * @param quantizationMaxVal The quantization max value that was used when operand was quantized. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedClipByValue} output and operands + * @return a new instance of UniformQuantizedClipByValue + */ + public UniformQuantizedClipByValue uniformQuantizedClipByValue( + Operand operand, Operand min, Operand max, Operand scales, + Operand zeroPoints, Long quantizationMinVal, Long quantizationMaxVal, + UniformQuantizedClipByValue.Options... options) { + return UniformQuantizedClipByValue.create(scope, operand, min, max, scales, zeroPoints, quantizationMinVal, quantizationMaxVal, options); + } + /** * Finds unique elements along an axis of a tensor. * This operation either returns a tensor {@code y} containing unique elements @@ -7771,8 +8549,6 @@ public UnbatchGrad unbatchGrad(Operand originalInput, * idx ==> [0, 1, 1] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -7822,8 +8598,6 @@ public Unique unique(Operand x, Operand * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -7880,8 +8654,6 @@ public Unique unique(Operand x, * count ==> [1, 2] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -7936,8 +8708,6 @@ public UniqueWithCounts uniqueWithCounts(Operand * count ==> [1, 2] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -7971,7 +8741,6 @@ public UniqueWithCounts uniqueWithCou * Equivalent to np.unravel_index *
    {@literal @}end_compatibility * - * @param data type for {@code output} output * @param indices An 0-D or 1-D {@code int} Tensor whose elements are indices into the * flattened version of an array of dimensions dims. * @param dims An 1-D {@code int} Tensor. The shape of the array to use for unraveling @@ -7995,7 +8764,6 @@ public UnravelIndex unravelIndex(Operand indices, Oper * Etc. *

    This is the opposite of {@code pack}. * - * @param data type for {@code output} output * @param value 1-D or higher, with {@code axis} dimension size equal to {@code num}. * @param num The value of the num attribute * @param options carries optional attribute values @@ -8020,6 +8788,62 @@ public Unstage unstage(List> dtypes, Unstage.Options... o return Unstage.create(scope, dtypes, options); } + /** + * Applies upper_bound(sorted_search_values, values) along each row. + * Each set of rows with the same index in (sorted_inputs, values) is treated + * independently. The resulting row is the equivalent of calling + * {@code np.searchsorted(sorted_inputs, values, side='right')}. + *

    The result is not a global index to the entire + * {@code Tensor}, but rather just the index in the last dimension. + *

    A 2-D example: + * sorted_sequence = [[0, 3, 9, 9, 10], + * [1, 2, 3, 4, 5]] + * values = [[2, 4, 9], + * [0, 2, 6]] + *

    result = UpperBound(sorted_sequence, values) + *

    result == [[1, 2, 4], + * [0, 2, 5]] + * + * @param sortedInputs 2-D Tensor where each row is ordered. + * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains + * the values that will be searched for in {@code sorted_search_values}. + * @param data type for {@code UpperBound} output and operands + * @return a new instance of UpperBound, with default output types + */ + public UpperBound upperBound(Operand sortedInputs, + Operand values) { + return UpperBound.create(scope, sortedInputs, values); + } + + /** + * Applies upper_bound(sorted_search_values, values) along each row. + * Each set of rows with the same index in (sorted_inputs, values) is treated + * independently. The resulting row is the equivalent of calling + * {@code np.searchsorted(sorted_inputs, values, side='right')}. + *

    The result is not a global index to the entire + * {@code Tensor}, but rather just the index in the last dimension. + *

    A 2-D example: + * sorted_sequence = [[0, 3, 9, 9, 10], + * [1, 2, 3, 4, 5]] + * values = [[2, 4, 9], + * [0, 2, 6]] + *

    result = UpperBound(sorted_sequence, values) + *

    result == [[1, 2, 4], + * [0, 2, 5]] + * + * @param sortedInputs 2-D Tensor where each row is ordered. + * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains + * the values that will be searched for in {@code sorted_search_values}. + * @param outType The value of the outType attribute + * @param data type for {@code UpperBound} output and operands + * @param data type for {@code UpperBound} output and operands + * @return a new instance of UpperBound + */ + public UpperBound upperBound(Operand sortedInputs, + Operand values, Class outType) { + return UpperBound.create(scope, sortedInputs, values, outType); + } + /** * Creates a handle to a Variable resource. * @@ -8066,7 +8890,6 @@ public Variable variable(Operand init, Variable.Options. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param data type for {@code ref} output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values @@ -8087,7 +8910,6 @@ public Variable variable(Shape shape, Class dtype, * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of VariableShape, with default output types */ @@ -8104,7 +8926,6 @@ public VariableShape variableShape(Operand input) { * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code VariableShape} output and operands @@ -8225,7 +9046,6 @@ public Zeros zeros(Operand dims, Class data type for {@code y} output * @param x a tensor of type T. * @param data type for {@code ZerosLike} output and operands * @return a new instance of ZerosLike @@ -8237,7 +9057,7 @@ public ZerosLike zerosLike(Operand x) { /** * Returns an API that builds operations with the provided name prefix. * - * @see {@link Scope#withSubScope(String)} + * @see Scope#withSubScope(String) */ public Ops withSubScope(String childScopeName) { return new Ops(scope.withSubScope(childScopeName)); @@ -8259,7 +9079,7 @@ public Ops withInitScope() { /** * Returns an API that uses the provided name for an op. * - * @see {@link Scope#withName(String)} + * @see Scope#withName(String) */ public Ops withName(String opName) { return new Ops(scope.withName(opName)); @@ -8268,7 +9088,7 @@ public Ops withName(String opName) { /** * Returns an API that places the created operations on the device(s) matching the provided spec. * - * @see {@link Scope#withDevice(DeviceSpec)} + * @see Scope#withDevice(DeviceSpec) */ public Ops withDevice(DeviceSpec deviceSpec) { return new Ops(scope.withDevice(deviceSpec)); @@ -8277,7 +9097,7 @@ public Ops withDevice(DeviceSpec deviceSpec) { /** * Returns an API that adds operations to the graph with the provided control dependencies. * - * @see {@link Scope#withControlDependencies(Iterable>)} + * @see Scope#withControlDependencies(Iterable) */ public Ops withControlDependencies(Iterable controls) { return new Ops(scope.withControlDependencies(controls)); @@ -8286,7 +9106,7 @@ public Ops withControlDependencies(Iterable controls) { /** * Returns an API that adds operations to the graph with the provided control dependencies. * - * @see {@link Scope#withControlDependencies(Iterable>)} + * @see Scope#withControlDependencies(Iterable) */ public Ops withControlDependencies(Op... controls) { return withControlDependencies(Arrays.asList(controls)); @@ -8295,7 +9115,7 @@ public Ops withControlDependencies(Op... controls) { /** * Returns an API that adds operations to the graph with the provided control dependencies. * - * @see {@link Scope#withControlDependencyOps(Iterable)} + * @see Scope#withControlDependencyOps(Iterable) */ public Ops withControlDependencyOps(Iterable controls) { return new Ops(scope.withControlDependencyOps(controls)); @@ -8304,7 +9124,7 @@ public Ops withControlDependencyOps(Iterable controls) { /** * Returns an API that adds operations to the graph with the provided control dependencies. * - * @see {@link Scope#withControlDependencyOps(Iterable)} + * @see Scope#withControlDependencyOps(Iterable) */ public Ops withControlDependencyOps(Operation... controls) { return withControlDependencyOps(Arrays.asList(controls)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java index e0302dd0252..8bd174ba427 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java @@ -32,8 +32,15 @@ import org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad; import org.tensorflow.op.quantization.QuantizeDownAndShrinkRange; import org.tensorflow.op.quantization.QuantizedConcat; +import org.tensorflow.op.quantization.QuantizedMatMulWithBiasAndDequantize; +import org.tensorflow.op.quantization.QuantizedMatMulWithBiasAndRequantize; import org.tensorflow.op.quantization.RequantizationRange; import org.tensorflow.op.quantization.Requantize; +import org.tensorflow.op.quantization.UniformDequantize; +import org.tensorflow.op.quantization.UniformQuantize; +import org.tensorflow.op.quantization.UniformQuantizedDot; +import org.tensorflow.op.quantization.UniformQuantizedDotHybrid; +import org.tensorflow.op.quantization.UniformRequantize; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -42,7 +49,7 @@ /** * An API for building {@code quantization} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class QuantizationOps { private final Scope scope; @@ -100,7 +107,6 @@ public final class QuantizationOps { * max_range / max_expected_T); * * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -158,7 +164,6 @@ public Dequantize dequantize(Operand input, * max_range / max_expected_T); * * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -268,6 +273,28 @@ public FakeQuantWithMinMaxArgsGradient fakeQuantWithMinMaxArgsGradient( *

*

This operation has a gradient and thus allows for training {@code min} and {@code max} * values. + *

+ *
+ *
+ *

constant_input = tf.constant([[1.2, -0.3, 0.7], [2.1, 0.5, -1.0]], dtype=tf.float32) + *

min_val = -0.5 + * max_val = 0.8 + * num_bits = 8 + * narrow_range = False #False:for the quantization range [0; 2^num_bits - 1] + *

quantized_data = tf.quantization.fake_quant_with_min_max_vars( + * ... inputs=constant_input, min=min_val, max=max_val, num_bits=num_bits, narrow_range=narrow_range + * ... ) + *

print("Input:\n", constant_input.numpy()) + * Input: + * [[ 1.2 -0.3 0.7] + * [ 2.1 0.5 -1. ]] + * print("Output:\n", quantized_data.numpy()) + * Output: + * [[ 0.8003921 -0.3007843 0.6984313] + * [ 0.8003921 0.4996078 -0.4996078]] + *

+ *
+ *
* * @param inputs The inputs value * @param min The min value @@ -449,7 +476,6 @@ public FakeQuantWithMinMaxVarsPerChannelGradient fakeQuantWithMinMaxVarsPerChann * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum value of the quantization range. This value may be adjusted by the * op depending on other parameters. The adjusted value is written to {@code output_min}. @@ -475,7 +501,6 @@ public Quantize quantize(Operand input, * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The inputMin value * @param inputMax The inputMax value @@ -495,7 +520,6 @@ public QuantizeAndDequantize quantizeAndDequantize(Operan * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The inputMin value * @param inputMax The inputMax value @@ -515,7 +539,6 @@ public QuantizeAndDequantizeV3 quantizeAndDequantizeV3(Op * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. * - * @param data type for {@code output} output * @param input Tensor to quantize and then dequantize. * @param inputMin If {@code range_given == True}, this specifies the minimum input value that needs to * be represented, otherwise it is determined from the min value of the {@code input} @@ -537,7 +560,6 @@ public QuantizeAndDequantizeV4 quantizeAndDequantizeV4(Op * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. * - * @param data type for {@code input_backprop} output * @param gradients The gradients value * @param input The input value * @param inputMin The inputMin value @@ -574,7 +596,6 @@ public QuantizeAndDequantizeV4Grad quantizeAndDequantizeV * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -591,7 +612,6 @@ public QuantizeDownAndShrinkRange quantizeDownAndShrinkRa /** * Concatenates quantized tensors along one dimension. * - * @param data type for {@code output} output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). * @param values The {@code N} Tensors to concatenate. Their ranks and types must match, @@ -607,6 +627,58 @@ public QuantizedConcat quantizedConcat(Operand conc return QuantizedConcat.create(scope, concatDim, values, inputMins, inputMaxes); } + /** + * The QuantizedMatMulWithBiasAndDequantize operation + * + * @param a The a value + * @param b The b value + * @param bias The bias value + * @param minA The minA value + * @param maxA The maxA value + * @param minB The minB value + * @param maxB The maxB value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param Toutput The value of the Toutput attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedMatMulWithBiasAndDequantize} output and operands + * @return a new instance of QuantizedMatMulWithBiasAndDequantize + */ + public QuantizedMatMulWithBiasAndDequantize quantizedMatMulWithBiasAndDequantize( + Operand a, Operand b, Operand bias, + Operand minA, Operand maxA, Operand minB, + Operand maxB, Operand minFreezedOutput, + Operand maxFreezedOutput, Class Toutput, + QuantizedMatMulWithBiasAndDequantize.Options... options) { + return QuantizedMatMulWithBiasAndDequantize.create(scope, a, b, bias, minA, maxA, minB, maxB, minFreezedOutput, maxFreezedOutput, Toutput, options); + } + + /** + * The QuantizedMatMulWithBiasAndRequantize operation + * + * @param a The a value + * @param b The b value + * @param bias The bias value + * @param minA The minA value + * @param maxA The maxA value + * @param minB The minB value + * @param maxB The maxB value + * @param minFreezedOutput The minFreezedOutput value + * @param maxFreezedOutput The maxFreezedOutput value + * @param Toutput The value of the Toutput attribute + * @param options carries optional attribute values + * @param data type for {@code QuantizedMatMulWithBiasAndRequantize} output and operands + * @return a new instance of QuantizedMatMulWithBiasAndRequantize + */ + public QuantizedMatMulWithBiasAndRequantize quantizedMatMulWithBiasAndRequantize( + Operand a, Operand b, Operand bias, + Operand minA, Operand maxA, Operand minB, + Operand maxB, Operand minFreezedOutput, + Operand maxFreezedOutput, Class Toutput, + QuantizedMatMulWithBiasAndRequantize.Options... options) { + return QuantizedMatMulWithBiasAndRequantize.create(scope, a, b, bias, minA, maxA, minB, maxB, minFreezedOutput, maxFreezedOutput, Toutput, options); + } + /** * Computes a range that covers the actual values present in a quantized tensor. * Given a quantized tensor described by {@code (input, input_min, input_max)}, outputs a @@ -633,7 +705,6 @@ public RequantizationRange requantizationRange(Operand input, * {@code input_max} is 1.0f, and we are dealing with {@code quint16} quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -649,6 +720,198 @@ public Requantize requantize(Operand i return Requantize.create(scope, input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, outType); } + /** + * Perform dequantization on the quantized Tensor {@code input}. + * Given quantized {@code input} which was quantized using {@code scales} and {@code zero_points}, performs dequantization using the formula: + * dequantized_data = (quantized_data - zero_point) * scale. + * + * @param input Must be a Tensor of Tin. + * @param scales The float value(s) used as scale(s) when quantizing original data that input represents. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + * @param zeroPoints The int32 value(s) used as zero_point(s) when quantizing original data that input represents. + * Same shape condition as scales. + * @param Tout The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 + * @param quantizationMinVal The quantization min value that was used when input was quantized. + * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. + * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + * @param quantizationMaxVal The quantization max value that was used when input was quantized. + * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + * {@code (Tout max)} for both narrow range and not narrow range. + * For example, if Tin is qint8, this is set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformDequantize} output and operands + * @return a new instance of UniformDequantize + */ + public UniformDequantize uniformDequantize( + Operand input, Operand scales, Operand zeroPoints, + Class Tout, Long quantizationMinVal, Long quantizationMaxVal, + UniformDequantize.Options... options) { + return UniformDequantize.create(scope, input, scales, zeroPoints, Tout, quantizationMinVal, quantizationMaxVal, options); + } + + /** + * Perform quantization on Tensor {@code input}. + * Given {@code input}, {@code scales} and {@code zero_points}, performs quantization using the formula: + * quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point + * + * @param input Must be a Tensor of Tin. + * @param scales The float value(s) to use as scale(s) to quantize {@code input}. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + * @param zeroPoints The int32 value(s) to use as zero_point(s) to quantize {@code input}. + * Same shape condition as scales. + * @param Tout The type of output Tensor. A tf.DType from: tf.float32 + * @param quantizationMinVal The quantization min value to quantize {@code input}. + * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. + * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + * @param quantizationMaxVal The quantization max value to quantize {@code input}. + * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + * {@code (Tout max)} for both narrow range and not narrow range. + * For example, if Tin is qint8, this is set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantize} output and operands + * @return a new instance of UniformQuantize + */ + public UniformQuantize uniformQuantize(Operand input, + Operand scales, Operand zeroPoints, Class Tout, Long quantizationMinVal, + Long quantizationMaxVal, UniformQuantize.Options... options) { + return UniformQuantize.create(scope, input, scales, zeroPoints, Tout, quantizationMinVal, quantizationMaxVal, options); + } + + /** + * Perform quantized dot of quantized Tensor {@code lhs} and quantized Tensor {@code rhs} to make quantized {@code output}. + * Given quantized {@code lhs} and quantized {@code rhs}, performs quantized dot on {@code lhs} and {@code rhs} to make quantized {@code output}. + * {@code lhs} and {@code rhs} must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). + * {@code lhs} and {@code rhs} must be quantized Tensor, where data value is quantized using the formula: + * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + * {@code output} is also quantized, using the same formula. + * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. + * + * @param lhs Must be a 2D Tensor of Tin. + * @param rhs Must be a 2D Tensor of Tin. + * @param lhsScales The float value(s) used as scale when quantizing original data that lhs represents. + * Must be a scalar Tensor (lhs supports only per-tensor quantization). + * @param lhsZeroPoints The int32 value(s) used as zero_point when quantizing original data that lhs represents. + * Same shape condition as lhs_scales. + * @param rhsScales The float value(s) used as scale when quantizing original data that rhs represents. + * Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization). + * @param rhsZeroPoints The int32 value(s) used as zero_point when quantizing original data that rhs represents. + * Same shape condition as rhs_scales. + * @param outputScales The float value(s) to use as scales when quantizing original data that output represents. + * Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (output.dim_size(1),) (per-channel quantization). + * If rhs is per-tensor quantized, output must be also per-tensor quantized. + * This means that if rhs_scales and rhs_zero_points are scalar Tensors, output_scales and output_zero_points must be scalar Tensors as well. + * @param outputZeroPoints The int32 value(s) used as zero_point when quantizing original data that output represents. + * Same shape condition as rhs_scales. + * @param Tout The type of output Tensor. + * @param lhsQuantizationMinVal The min value of the quantized data stored in lhs. + * For example, if Tin is qint8, this must be set to -127 if narrow range quantized or -128 if not. + * @param lhsQuantizationMaxVal The max value of the quantized data stored in rhs. + * For example, if Tin is qint8, this must be set to 127. + * @param rhsQuantizationMinVal The min value of the quantized data stored in rhs. + * For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not. + * @param rhsQuantizationMaxVal The max value of the quantized data stored in rhs. + * For example, if Trhs is qint8, this must be set to 127. + * @param outputQuantizationMinVal The min value of the quantized data stored in output. + * For example, if Tout is qint8, this must be set to -127 if narrow range quantized or -128 if not. + * @param outputQuantizationMaxVal The max value of the quantized data stored in output. + * For example, if Tout is qint8, this must be set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedDot} output and operands + * @param data type for {@code UniformQuantizedDot} output and operands + * @return a new instance of UniformQuantizedDot + */ + public UniformQuantizedDot uniformQuantizedDot( + Operand lhs, Operand rhs, Operand lhsScales, Operand lhsZeroPoints, + Operand rhsScales, Operand rhsZeroPoints, Operand outputScales, + Operand outputZeroPoints, Class Tout, Long lhsQuantizationMinVal, + Long lhsQuantizationMaxVal, Long rhsQuantizationMinVal, Long rhsQuantizationMaxVal, + Long outputQuantizationMinVal, Long outputQuantizationMaxVal, + UniformQuantizedDot.Options... options) { + return UniformQuantizedDot.create(scope, lhs, rhs, lhsScales, lhsZeroPoints, rhsScales, rhsZeroPoints, outputScales, outputZeroPoints, Tout, lhsQuantizationMinVal, lhsQuantizationMaxVal, rhsQuantizationMinVal, rhsQuantizationMaxVal, outputQuantizationMinVal, outputQuantizationMaxVal, options); + } + + /** + * Perform hybrid quantized dot of float Tensor {@code lhs} and quantized Tensor {@code rhs}. + * Given float {@code lhs} and quantized {@code rhs}, internally performs quantization on {@code lhs}, and then performs quantized dot on quantized lhs and {@code rhs}. + * The internal quantization on {@code lhs} is a quantization to qint8, dynamic range, per-batch (per-axis along axis 0), asymmetric, and not narrow range (the range is [-128, 127]). + * {@code lhs} and {@code rhs} must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). + * {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: + * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + * + * @param lhs Must be a 2D Tensor of Tlhs. + * @param rhs Must be a 2D Tensor of Trhs. + * @param rhsScales The float value(s) used as scale when quantizing original data that rhs represents. + * Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization). + * @param rhsZeroPoints The int32 value(s) used as zero_point when quantizing original data that rhs represents. + * Same shape condition as rhs_scales. + * @param Tout The type of output Tensor. + * @param rhsQuantizationMinVal The min value of the quantized data stored in rhs. + * For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not. + * @param rhsQuantizationMaxVal The max value of the quantized data stored in rhs. + * For example, if Trhs is qint8, this must be set to 127. + * @param options carries optional attribute values + * @param data type for {@code UniformQuantizedDotHybrid} output and operands + * @return a new instance of UniformQuantizedDotHybrid + */ + public UniformQuantizedDotHybrid uniformQuantizedDotHybrid( + Operand lhs, Operand rhs, Operand rhsScales, + Operand rhsZeroPoints, Class Tout, Long rhsQuantizationMinVal, + Long rhsQuantizationMaxVal, UniformQuantizedDotHybrid.Options... options) { + return UniformQuantizedDotHybrid.create(scope, lhs, rhs, rhsScales, rhsZeroPoints, Tout, rhsQuantizationMinVal, rhsQuantizationMaxVal, options); + } + + /** + * Given quantized tensor {@code input}, requantize it with new quantization parameters. + * Given quantized tensor {@code input}, which was quantized using {input_scales, input_zero_points, input_quantization_axis, input_quantization_min_val, input_quantization_max_val}, + * requantize it to a tensor, which is quantized using {output_scales, output_zero_points, output_quantization_axis, output_quantization_min_val, output_quantization_max_val}. + * The requantization is done by using the formula: + * output_quantized_data = clip( + * (input_quantized_data - input_zero_point) * (input_scale / output_scale) + output_zero_point, + * output_quantization_min_val, + * output_quantization_max_val) + *

Per-tensor and per-axis quantization supported cases are followings: + *

    + *
  • per-tensor -> per-tensor
  • + *
  • per-tensor -> per-axis
  • + *
  • per-axis -> per-axis where input_quantization_axis equals output_quantization_axis. + * i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.
  • + *
+ * + * @param input Must be a Tensor of Tin. + * @param inputScales The float value(s) used as scale(s) when quantizing original data that {@code input} represents. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + * @param inputZeroPoints The int32 value(s) used as zero_point(s) when quantizing original data that {@code input} represents. + * Same shape condition as scales. + * @param outputScales The float value(s) to use as new scale(s) to quantize original data that {@code input} represents. + * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + * @param outputZeroPoints The int32 value(s) to use as new zero_point(s) to quantize original data that {@code input} represents. + * Same shape condition as scales. + * @param Tout The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 + * @param inputQuantizationMinVal The quantization min value that was used when quantizing original data that {@code input} represents. + * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. + * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + * @param inputQuantizationMaxVal The quantization max value that was used when quantizing original data that {@code input} represents. + * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + * {@code (Tout max)} for both narrow range and not narrow range. + * For example, if Tin is qint8, this is set to 127. + * @param outputQuantizationMinVal The new quantization min value to quantize original data that {@code input} represents. + * @param outputQuantizationMaxVal The new quantization max value to quantize original data that {@code input} represents. + * @param options carries optional attribute values + * @param data type for {@code UniformRequantize} output and operands + * @return a new instance of UniformRequantize + */ + public UniformRequantize uniformRequantize( + Operand input, Operand inputScales, + Operand inputZeroPoints, Operand outputScales, + Operand outputZeroPoints, Class Tout, Long inputQuantizationMinVal, + Long inputQuantizationMaxVal, Long outputQuantizationMinVal, Long outputQuantizationMaxVal, + UniformRequantize.Options... options) { + return UniformRequantize.create(scope, input, inputScales, inputZeroPoints, outputScales, outputZeroPoints, Tout, inputQuantizationMinVal, inputQuantizationMaxVal, outputQuantizationMinVal, outputQuantizationMaxVal, options); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java index a1cd7bbdc4b..43b18f0cf57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java @@ -17,10 +17,21 @@ // package org.tensorflow.op; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.op.ragged.RaggedBincount; +import org.tensorflow.op.ragged.RaggedCountSparseOutput; +import org.tensorflow.op.ragged.RaggedCross; import org.tensorflow.op.ragged.RaggedFillEmptyRows; import org.tensorflow.op.ragged.RaggedFillEmptyRowsGrad; +import org.tensorflow.op.ragged.RaggedGather; +import org.tensorflow.op.ragged.RaggedRange; +import org.tensorflow.op.ragged.RaggedTensorFromVariant; +import org.tensorflow.op.ragged.RaggedTensorToSparse; +import org.tensorflow.op.ragged.RaggedTensorToTensor; +import org.tensorflow.op.ragged.RaggedTensorToVariant; +import org.tensorflow.op.ragged.RaggedTensorToVariantGradient; +import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -28,7 +39,7 @@ /** * An API for building {@code ragged} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class RaggedOps { private final Scope scope; @@ -49,7 +60,6 @@ public final class RaggedOps { * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param splits 1D int64 {@code Tensor}. * @param values 2D int {@code Tensor}. * @param sizeOutput non-negative int scalar {@code Tensor}. @@ -67,10 +77,60 @@ public RaggedBincount raggedBincount( return RaggedBincount.create(scope, splits, values, sizeOutput, weights, options); } + /** + * Performs sparse-output bin counting for a ragged tensor input. + * Counts the number of times each value occurs in the input. + * + * @param splits Tensor containing the row splits of the ragged tensor to count. + * @param values Tensor containing values of the sparse tensor to count. + * @param weights A Tensor of the same shape as indices containing per-index weight values. + * May also be the empty tensor if no weights are used. + * @param binaryOutput Whether to output the number of occurrences of each value or 1. + * @param options carries optional attribute values + * @param data type for {@code RaggedCountSparseOutput} output and operands + * @return a new instance of RaggedCountSparseOutput + */ + public RaggedCountSparseOutput raggedCountSparseOutput( + Operand splits, Operand values, Operand weights, + Boolean binaryOutput, RaggedCountSparseOutput.Options... options) { + return RaggedCountSparseOutput.create(scope, splits, values, weights, binaryOutput, options); + } + + /** + * Generates a feature cross from a list of tensors, and returns it as a + * RaggedTensor. See {@code tf.ragged.cross} for more details. + * + * @param raggedValues The values tensor for each RaggedTensor input. + * @param raggedRowSplits The row_splits tensor for each RaggedTensor input. + * @param sparseIndices The indices tensor for each SparseTensor input. + * @param sparseValues The values tensor for each SparseTensor input. + * @param sparseShape The dense_shape tensor for each SparseTensor input. + * @param denseInputs The tf.Tensor inputs. + * @param inputOrder String specifying the tensor type for each input. The {@code i}th character in + * this string specifies the type of the {@code i}th input, and is one of: 'R' (ragged), + * 'D' (dense), or 'S' (sparse). This attr is used to ensure that the crossed + * values are combined in the order of the inputs from the call to tf.ragged.cross. + * @param hashedOutput The value of the hashedOutput attribute + * @param numBuckets The value of the numBuckets attribute + * @param hashKey The value of the hashKey attribute + * @param outValuesType The value of the outValuesType attribute + * @param outRowSplitsType The value of the outRowSplitsType attribute + * @param data type for {@code RaggedCross} output and operands + * @param data type for {@code RaggedCross} output and operands + * @return a new instance of RaggedCross + */ + public RaggedCross raggedCross( + Iterable> raggedValues, Iterable> raggedRowSplits, + Iterable> sparseIndices, Iterable> sparseValues, + Iterable> sparseShape, Iterable> denseInputs, String inputOrder, + Boolean hashedOutput, Long numBuckets, Long hashKey, Class outValuesType, + Class outRowSplitsType) { + return RaggedCross.create(scope, raggedValues, raggedRowSplits, sparseIndices, sparseValues, sparseShape, denseInputs, inputOrder, hashedOutput, numBuckets, hashKey, outValuesType, outRowSplitsType); + } + /** * The RaggedFillEmptyRows operation * - * @param data type for {@code output_values} output * @param valueRowids The valueRowids value * @param values The values value * @param nrows The nrows value @@ -86,7 +146,6 @@ public RaggedFillEmptyRows raggedFillEmptyRows(Operand data type for {@code d_values} output * @param reverseIndexMap The reverseIndexMap value * @param gradValues The gradValues value * @param data type for {@code RaggedFillEmptyRowsGrad} output and operands @@ -97,6 +156,284 @@ public RaggedFillEmptyRowsGrad raggedFillEmptyRowsGrad( return RaggedFillEmptyRowsGrad.create(scope, reverseIndexMap, gradValues); } + /** + * Gather ragged slices from {@code params} axis {@code 0} according to {@code indices}. + * Outputs a {@code RaggedTensor} output composed from {@code output_dense_values} and + * {@code output_nested_splits}, such that: + *

+   *  output.shape = indices.shape + params.shape[1:]
+   *  output.ragged_rank = indices.shape.ndims + params.ragged_rank
+   *  output[i...j, d0...dn] = params[indices[i...j], d0...dn]
+   *  
+ *

where + *

    + *
  • {@code params = ragged.from_nested_row_splits(params_dense_values, params_nested_splits)} + * provides the values that should be gathered.
  • + *
  • {@code indices} ia a dense tensor with dtype {@code int32} or {@code int64}, indicating which + * values should be gathered.
  • + *
  • {@code output = ragged.from_nested_row_splits(output_dense_values, output_nested_splits)} + * is the output tensor.
  • + *
+ *

(Note: This c++ op is used to implement the higher-level python + * {@code tf.ragged.gather} op, which also supports ragged indices.) + * + * @param paramsNestedSplits The {@code nested_row_splits} tensors that define the row-partitioning for the + * {@code params} RaggedTensor input. + * @param paramsDenseValues The {@code flat_values} for the {@code params} RaggedTensor. There was a terminology change + * at the python level from dense_values to flat_values, so dense_values is the + * deprecated name. + * @param indices Indices in the outermost dimension of {@code params} of the values that should be + * gathered. + * @param OUTPUTRAGGEDRANK The ragged rank of the output RaggedTensor. {@code output_nested_splits} will contain + * this number of {@code row_splits} tensors. This value should equal + * {@code indices.shape.ndims + params.ragged_rank - 1}. + * @param data type for {@code RaggedGather} output and operands + * @param data type for {@code RaggedGather} output and operands + * @return a new instance of RaggedGather + */ + public RaggedGather raggedGather( + Iterable> paramsNestedSplits, Operand paramsDenseValues, + Operand indices, Long OUTPUTRAGGEDRANK) { + return RaggedGather.create(scope, paramsNestedSplits, paramsDenseValues, indices, OUTPUTRAGGEDRANK); + } + + /** + * Returns a {@code RaggedTensor} containing the specified sequences of numbers. + * Returns a {@code RaggedTensor} {@code result} composed from {@code rt_dense_values} and + * {@code rt_nested_splits}, such that + * {@code result[i] = range(starts[i], limits[i], deltas[i])}. + *

+   *  (rt_nested_splits, rt_dense_values) = ragged_range(
+   *        starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
+   *  result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
+   *  print(result)
+   *  <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
+   *  
+ *

The input tensors {@code starts}, {@code limits}, and {@code deltas} may be scalars or vectors. + * The vector inputs must all have the same size. Scalar inputs are broadcast + * to match the size of the vector inputs. + * + * @param starts The starts of each range. + * @param limits The limits of each range. + * @param deltas The deltas of each range. + * @param data type for {@code RaggedRange} output and operands + * @return a new instance of RaggedRange, with default output types + */ + public RaggedRange raggedRange(Operand starts, + Operand limits, Operand deltas) { + return RaggedRange.create(scope, starts, limits, deltas); + } + + /** + * Returns a {@code RaggedTensor} containing the specified sequences of numbers. + * Returns a {@code RaggedTensor} {@code result} composed from {@code rt_dense_values} and + * {@code rt_nested_splits}, such that + * {@code result[i] = range(starts[i], limits[i], deltas[i])}. + *

+   *  (rt_nested_splits, rt_dense_values) = ragged_range(
+   *        starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
+   *  result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
+   *  print(result)
+   *  <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
+   *  
+ *

The input tensors {@code starts}, {@code limits}, and {@code deltas} may be scalars or vectors. + * The vector inputs must all have the same size. Scalar inputs are broadcast + * to match the size of the vector inputs. + * + * @param starts The starts of each range. + * @param limits The limits of each range. + * @param deltas The deltas of each range. + * @param Tsplits The value of the Tsplits attribute + * @param data type for {@code RaggedRange} output and operands + * @param data type for {@code RaggedRange} output and operands + * @return a new instance of RaggedRange + */ + public RaggedRange raggedRange(Operand starts, + Operand limits, Operand deltas, Class Tsplits) { + return RaggedRange.create(scope, starts, limits, deltas, Tsplits); + } + + /** + * Decodes a {@code variant} Tensor into a {@code RaggedTensor}. + * Decodes the given {@code variant} Tensor and returns a {@code RaggedTensor}. The input + * could be a scalar, meaning it encodes a single {@code RaggedTensor} with ragged_rank + * {@code output_ragged_rank}. It could also have an arbitrary rank, in which case each + * element is decoded into a {@code RaggedTensor} with ragged_rank {@code input_ragged_rank} + * and these are then stacked according to the input shape to output a single + * {@code RaggedTensor} with ragged_rank {@code output_ragged_rank}. Each {@code variant} element in + * the input Tensor is decoded by retrieving from the element a 1-D {@code variant} + * Tensor with {@code input_ragged_rank + 1} Tensors, corresponding to the splits and + * values of the decoded {@code RaggedTensor}. If {@code input_ragged_rank} is -1, then it is + * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See + * {@code RaggedTensorToVariant} for the corresponding encoding logic. + * + * @param encodedRagged A {@code variant} Tensor containing encoded {@code RaggedTensor}s. + * @param inputRaggedRank The ragged rank of each encoded {@code RaggedTensor} component in the input. If set to + * -1, this is inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)} + * @param outputRaggedRank The expected ragged rank of the output {@code RaggedTensor}. The following must hold: + * {@code output_ragged_rank = rank(encoded_ragged) + input_ragged_rank}. + * @param Tvalues The value of the Tvalues attribute + * @param data type for {@code RaggedTensorFromVariant} output and operands + * @return a new instance of RaggedTensorFromVariant, with default output types + */ + public RaggedTensorFromVariant raggedTensorFromVariant( + Operand encodedRagged, Long inputRaggedRank, Long outputRaggedRank, + Class Tvalues) { + return RaggedTensorFromVariant.create(scope, encodedRagged, inputRaggedRank, outputRaggedRank, Tvalues); + } + + /** + * Decodes a {@code variant} Tensor into a {@code RaggedTensor}. + * Decodes the given {@code variant} Tensor and returns a {@code RaggedTensor}. The input + * could be a scalar, meaning it encodes a single {@code RaggedTensor} with ragged_rank + * {@code output_ragged_rank}. It could also have an arbitrary rank, in which case each + * element is decoded into a {@code RaggedTensor} with ragged_rank {@code input_ragged_rank} + * and these are then stacked according to the input shape to output a single + * {@code RaggedTensor} with ragged_rank {@code output_ragged_rank}. Each {@code variant} element in + * the input Tensor is decoded by retrieving from the element a 1-D {@code variant} + * Tensor with {@code input_ragged_rank + 1} Tensors, corresponding to the splits and + * values of the decoded {@code RaggedTensor}. If {@code input_ragged_rank} is -1, then it is + * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See + * {@code RaggedTensorToVariant} for the corresponding encoding logic. + * + * @param encodedRagged A {@code variant} Tensor containing encoded {@code RaggedTensor}s. + * @param inputRaggedRank The ragged rank of each encoded {@code RaggedTensor} component in the input. If set to + * -1, this is inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)} + * @param outputRaggedRank The expected ragged rank of the output {@code RaggedTensor}. The following must hold: + * {@code output_ragged_rank = rank(encoded_ragged) + input_ragged_rank}. + * @param Tvalues The value of the Tvalues attribute + * @param Tsplits The value of the Tsplits attribute + * @param data type for {@code RaggedTensorFromVariant} output and operands + * @param data type for {@code RaggedTensorFromVariant} output and operands + * @return a new instance of RaggedTensorFromVariant + */ + public RaggedTensorFromVariant raggedTensorFromVariant( + Operand encodedRagged, Long inputRaggedRank, Long outputRaggedRank, + Class Tvalues, Class Tsplits) { + return RaggedTensorFromVariant.create(scope, encodedRagged, inputRaggedRank, outputRaggedRank, Tvalues, Tsplits); + } + + /** + * Converts a {@code RaggedTensor} into a {@code SparseTensor} with the same values. + * input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) + * output=SparseTensor(indices=sparse_indices, values=sparse_values, + * dense_shape=sparse_dense_shape) + * + * @param rtNestedSplits The {@code row_splits} for the {@code RaggedTensor}. + * @param rtDenseValues The {@code flat_values} for the {@code RaggedTensor}. + * @param data type for {@code RaggedTensorToSparse} output and operands + * @return a new instance of RaggedTensorToSparse + */ + public RaggedTensorToSparse raggedTensorToSparse( + Iterable> rtNestedSplits, Operand rtDenseValues) { + return RaggedTensorToSparse.create(scope, rtNestedSplits, rtDenseValues); + } + + /** + * Create a dense tensor from a ragged tensor, possibly altering its shape. + * The {@code ragged_to_dense} op creates a dense tensor from a list of row partition + * tensors, a value vector, and default values. If the shape is unspecified, the + * minimal shape required to contain all the elements in the ragged tensor (the + * natural shape) will be used. If some dimensions are left unspecified, then the + * size of the natural shape is used in that dimension. + *

The default_value will be broadcast to the output shape. After that, the values + * from the ragged tensor overwrite the default values. Note that the default_value + * must have less dimensions than the value. + *

The row partition tensors are in the order of the dimensions. + * At present, the types can be: + *

    + *
  • "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  • + *
  • "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  • + *
  • "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + * is preceded by "FIRST_DIM_SIZE".
  • + *
+ * + * @param shape The desired shape of the output tensor. If left unspecified (empty), + * the minimal shape required to contain all the elements in the ragged tensor + * (the natural shape) will be used. If some dimensions are left unspecified, then + * the size of the natural shape is used in that dimension. + *

Note that dense dimensions cannot be modified by the shape argument. Trying to + * change the size of a dense dimension will cause the op to fail. + * Examples: + * natural shape: [4, 5, 6] + * shape: -1 + * output shape: [4, 5, 6] + *

natural shape: [4, 5, 6] + * shape: [3, -1, 2] + * output shape: [3, 5, 2] + *

natural shape: [4, 5, 6] + * shape: [3, 7, 2] + * output shape: [3, 7, 2] + * @param values A 1D tensor representing the values of the ragged tensor. + * @param defaultValue The default_value when the shape is larger than the ragged tensor. The + * default_value is broadcast until it is the shape of the output tensor, and + * then overwritten by values in the ragged tensor. The default value must be + * compatible with this broadcast operation, and must have fewer dimensions than + * the value tensor. + * @param rowPartitionTensors The rowPartitionTensors value + * @param rowPartitionTypes The types of the row partition tensors. At present, these can be: + *

    + *
  • "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  • + *
  • "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  • + *
  • "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + * is preceeded by "FIRST_DIM_SIZE". + * The tensors are in the order of the dimensions.
  • + *
+ * @param data type for {@code RaggedTensorToTensor} output and operands + * @return a new instance of RaggedTensorToTensor + */ + public RaggedTensorToTensor raggedTensorToTensor( + Operand shape, Operand values, Operand defaultValue, + Iterable> rowPartitionTensors, List rowPartitionTypes) { + return RaggedTensorToTensor.create(scope, shape, values, defaultValue, rowPartitionTensors, rowPartitionTypes); + } + + /** + * Encodes a {@code RaggedTensor} into a {@code variant} Tensor. + * Encodes the given {@code RaggedTensor} and returns a {@code variant} Tensor. If + * {@code batched_input} is True, then input {@code RaggedTensor} is unbatched along the + * zero-th dimension, each component {@code RaggedTensor} is encoded into a scalar + * {@code variant} Tensor, and these are stacked to return a 1-D {@code variant} Tensor. + * If {@code batched_input} is False, then the input {@code RaggedTensor} is encoded as is and + * a scalar {@code variant} Tensor is returned. A {@code RaggedTensor} is encoded by first + * creating a 1-D {@code variant} Tensor with {@code ragged_rank + 1} elements, containing the + * splits and values Tensors of the {@code RaggedTensor}. Then the 1-D {@code variant} Tensor + * is wrapped in a scalar {@code variant} Tensor. See {@code RaggedTensorFromVariant} for the + * corresponding decoding logic. + * + * @param rtNestedSplits A list of one or more Tensors representing the splits of the input + * {@code RaggedTensor}. + * @param rtDenseValues A Tensor representing the values of the input {@code RaggedTensor}. + * @param batchedInput A {@code bool} denoting whether the input is a batched {@code RaggedTensor}. + * @return a new instance of RaggedTensorToVariant + */ + public RaggedTensorToVariant raggedTensorToVariant( + Iterable> rtNestedSplits, Operand rtDenseValues, + Boolean batchedInput) { + return RaggedTensorToVariant.create(scope, rtNestedSplits, rtDenseValues, batchedInput); + } + + /** + * Helper used to compute the gradient for {@code RaggedTensorToVariant}. + * Computes the gradient for the dense_values input to the RaggedTensorToVariant + * op, given the variant-encoded ragged gradients of the outputs, along with + * the outer row-splits and the shape of the dense-values that were provided as + * inputs to the RaggedTensorToVariant op. + * + * @param encodedRaggedGrad A {@code variant} Tensor containing encoded {@code RaggedTensor} gradients. + * @param rowSplits Outermost row-splits that were used as input to the RaggedTensorToVariant op. + * @param denseValuesShape Shape of the dense_values that was used as an input to the + * RaggedTensorToVariant op. + * @param Tvalues The value of the Tvalues attribute + * @param data type for {@code RaggedTensorToVariantGradient} output and operands + * @return a new instance of RaggedTensorToVariantGradient + */ + public RaggedTensorToVariantGradient raggedTensorToVariantGradient( + Operand encodedRaggedGrad, Operand rowSplits, + Operand denseValuesShape, Class Tvalues) { + return RaggedTensorToVariantGradient.create(scope, encodedRaggedGrad, rowSplits, denseValuesShape, Tvalues); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java new file mode 100644 index 00000000000..34d3585f270 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java @@ -0,0 +1,70 @@ +// Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op; + +import org.tensorflow.Operand; +import org.tensorflow.op.random.experimental.StatelessShuffle; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** + * An API for building {@code random.experimental} operations as {@link Op Op}s + * + * @see Ops + */ +public final class RandomExperimentalOps { + private final Scope scope; + + private final Ops ops; + + RandomExperimentalOps(Ops ops) { + this.scope = ops.scope(); + this.ops = ops; + } + + /** + * Randomly and deterministically shuffles a tensor along its first dimension. + * The tensor is shuffled along dimension 0, such that each {@code value[j]} is mapped + * to one and only one {@code output[i]}. For example, a mapping that might occur for a + * 3x2 tensor is: + *
+   *  [[1, 2],       [[5, 6],
+   *   [3, 4],  ==>   [1, 2],
+   *   [5, 6]]        [3, 4]]
+   *  
+ *

The outputs are a deterministic function of {@code value}, {@code key}, {@code counter} and {@code alg}. + * + * @param value The tensor to be shuffled. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param data type for {@code StatelessShuffle} output and operands + * @return a new instance of StatelessShuffle + */ + public StatelessShuffle statelessShuffle(Operand value, + Operand key, Operand counter, Operand alg) { + return StatelessShuffle.create(scope, value, key, counter, alg); + } + + /** + * Get the parent {@link Ops} object. + */ + public final Ops ops() { + return ops; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java index 9f2e4f7f288..c5ff9a489a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java @@ -19,24 +19,53 @@ import org.tensorflow.Operand; import org.tensorflow.op.random.AllCandidateSampler; +import org.tensorflow.op.random.AnonymousRandomSeedGenerator; +import org.tensorflow.op.random.AnonymousSeedGenerator; +import org.tensorflow.op.random.DeleteRandomSeedGenerator; +import org.tensorflow.op.random.DeleteSeedGenerator; +import org.tensorflow.op.random.DummySeedGenerator; import org.tensorflow.op.random.LogUniformCandidateSampler; import org.tensorflow.op.random.Multinomial; +import org.tensorflow.op.random.NonDeterministicInts; import org.tensorflow.op.random.ParameterizedTruncatedNormal; import org.tensorflow.op.random.RandomGamma; +import org.tensorflow.op.random.RandomGammaGrad; import org.tensorflow.op.random.RandomPoisson; import org.tensorflow.op.random.RandomShuffle; import org.tensorflow.op.random.RandomStandardNormal; import org.tensorflow.op.random.RandomUniform; import org.tensorflow.op.random.RandomUniformInt; import org.tensorflow.op.random.RecordInput; +import org.tensorflow.op.random.RngReadAndSkip; +import org.tensorflow.op.random.RngSkip; import org.tensorflow.op.random.StatefulRandomBinomial; import org.tensorflow.op.random.StatefulStandardNormal; +import org.tensorflow.op.random.StatefulTruncatedNormal; +import org.tensorflow.op.random.StatefulUniform; +import org.tensorflow.op.random.StatefulUniformFullInt; +import org.tensorflow.op.random.StatefulUniformInt; import org.tensorflow.op.random.StatelessMultinomial; +import org.tensorflow.op.random.StatelessParameterizedTruncatedNormal; +import org.tensorflow.op.random.StatelessRandomBinomial; +import org.tensorflow.op.random.StatelessRandomGamma; +import org.tensorflow.op.random.StatelessRandomGetAlg; +import org.tensorflow.op.random.StatelessRandomGetKeyCounter; +import org.tensorflow.op.random.StatelessRandomGetKeyCounterAlg; import org.tensorflow.op.random.StatelessRandomNormal; +import org.tensorflow.op.random.StatelessRandomNormalV2; +import org.tensorflow.op.random.StatelessRandomPoisson; import org.tensorflow.op.random.StatelessRandomUniform; +import org.tensorflow.op.random.StatelessRandomUniformFullInt; +import org.tensorflow.op.random.StatelessRandomUniformFullIntV2; +import org.tensorflow.op.random.StatelessRandomUniformInt; +import org.tensorflow.op.random.StatelessRandomUniformIntV2; +import org.tensorflow.op.random.StatelessRandomUniformV2; import org.tensorflow.op.random.StatelessTruncatedNormal; +import org.tensorflow.op.random.StatelessTruncatedNormalV2; +import org.tensorflow.op.random.ThreadUnsafeUnigramCandidateSampler; import org.tensorflow.op.random.TruncatedNormal; import org.tensorflow.op.random.UniformCandidateSampler; +import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -46,9 +75,11 @@ /** * An API for building {@code random} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class RandomOps { + public final RandomExperimentalOps experimental; + private final Scope scope; private final Ops ops; @@ -56,6 +87,7 @@ public final class RandomOps { RandomOps(Ops ops) { this.scope = ops.scope(); this.ops = ops; + experimental = new RandomExperimentalOps(ops); } /** @@ -83,6 +115,64 @@ public AllCandidateSampler allCandidateSampler(Operand trueClasses, Long return AllCandidateSampler.create(scope, trueClasses, numTrue, numSampled, unique, options); } + /** + * The AnonymousRandomSeedGenerator operation + * + * @param seed The seed value + * @param seed2 The seed2 value + * @return a new instance of AnonymousRandomSeedGenerator + */ + public AnonymousRandomSeedGenerator anonymousRandomSeedGenerator(Operand seed, + Operand seed2) { + return AnonymousRandomSeedGenerator.create(scope, seed, seed2); + } + + /** + * The AnonymousSeedGenerator operation + * + * @param seed The seed value + * @param seed2 The seed2 value + * @param reshuffle The reshuffle value + * @return a new instance of AnonymousSeedGenerator + */ + public AnonymousSeedGenerator anonymousSeedGenerator(Operand seed, Operand seed2, + Operand reshuffle) { + return AnonymousSeedGenerator.create(scope, seed, seed2, reshuffle); + } + + /** + * The DeleteRandomSeedGenerator operation + * + * @param handle The handle value + * @param deleter The deleter value + * @return a new instance of DeleteRandomSeedGenerator + */ + public DeleteRandomSeedGenerator deleteRandomSeedGenerator(Operand handle, + Operand deleter) { + return DeleteRandomSeedGenerator.create(scope, handle, deleter); + } + + /** + * The DeleteSeedGenerator operation + * + * @param handle The handle value + * @param deleter The deleter value + * @return a new instance of DeleteSeedGenerator + */ + public DeleteSeedGenerator deleteSeedGenerator(Operand handle, + Operand deleter) { + return DeleteSeedGenerator.create(scope, handle, deleter); + } + + /** + * The DummySeedGenerator operation + * + * @return a new instance of DummySeedGenerator + */ + public DummySeedGenerator dummySeedGenerator() { + return DummySeedGenerator.create(scope); + } + /** * Generates labels for candidate sampling with a log-uniform distribution. * See explanations of candidate sampling and the data formats at @@ -113,7 +203,6 @@ public LogUniformCandidateSampler logUniformCandidateSampler(Operand tru /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -128,7 +217,6 @@ public Multinomial multinomial(Operand logits, /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -142,12 +230,36 @@ public Multinomial multinomial(Operand return Multinomial.create(scope, logits, numSamples, outputDtype, options); } + /** + * Non-deterministically generates some integers. + * This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. + * + * @param shape The shape of the output tensor. + * @return a new instance of NonDeterministicInts, with default output types + */ + public NonDeterministicInts nonDeterministicInts(Operand shape) { + return NonDeterministicInts.create(scope, shape); + } + + /** + * Non-deterministically generates some integers. + * This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. + * + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for {@code NonDeterministicInts} output and operands + * @return a new instance of NonDeterministicInts + */ + public NonDeterministicInts nonDeterministicInts( + Operand shape, Class dtype) { + return NonDeterministicInts.create(scope, shape, dtype); + } + /** * Outputs random values from a normal distribution. The parameters may each be a * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. @@ -170,7 +282,6 @@ public ParameterizedTruncatedNormal parameterizedTruncate * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. * @param alpha A tensor in which each scalar is a "shape" parameter describing the @@ -184,6 +295,19 @@ public RandomGamma randomGamma(Operand return RandomGamma.create(scope, shape, alpha, options); } + /** + * Computes the derivative of a Gamma random sample w.r.t. {@code alpha}. + * + * @param alpha The alpha value + * @param sample The sample value + * @param data type for {@code RandomGammaGrad} output and operands + * @return a new instance of RandomGammaGrad + */ + public RandomGammaGrad randomGammaGrad(Operand alpha, + Operand sample) { + return RandomGammaGrad.create(scope, alpha, sample); + } + /** * Outputs random values from the Poisson distribution(s) described by rate. * This op uses two algorithms, depending on rate. If rate >= 10, then @@ -195,7 +319,6 @@ public RandomGamma randomGamma(Operand * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the @@ -219,7 +342,6 @@ public RandomPoisson randomPoisson(Operand shape, * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the @@ -245,7 +367,6 @@ public RandomPoisson randomPoisson(Operand * - * @param data type for {@code output} output * @param value The tensor to be shuffled. * @param options carries optional attribute values * @param data type for {@code RandomShuffle} output and operands @@ -260,7 +381,6 @@ public RandomShuffle randomShuffle(Operand value, * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values @@ -277,7 +397,6 @@ public RandomStandardNormal randomStandardNormal( * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values @@ -298,7 +417,6 @@ public RandomUniform randomUniform(Operand data type for {@code output} output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. * @param maxval 0-D. Exclusive upper bound on the generated integers. @@ -322,10 +440,44 @@ public RecordInput recordInput(String filePattern, RecordInput.Options... option return RecordInput.create(scope, filePattern, options); } + /** + * Advance the counter of a counter-based RNG. + * The state of the RNG after + * {@code rng_read_and_skip(n)} will be the same as that after {@code uniform([n])} + * (or any other distribution). The actual increment added to the + * counter is an unspecified implementation choice. + *

In the case that the input algorithm is RNG_ALG_AUTO_SELECT, the counter in the state needs to be of size int64[2], the current maximal counter size among algorithms. In this case, this op will manage the counter as if it is an 128-bit integer with layout [lower_64bits, higher_64bits]. If an algorithm needs less than 128 bits for the counter, it should use the left portion of the int64[2]. In this way, the int64[2] is compatible with all current RNG algorithms (Philox, ThreeFry and xla::RandomAlgorithm::RNG_DEFAULT). Downstream RNG ops can thus use this counter with any RNG algorithm. + * + * @param resource The handle of the resource variable that stores the state of the RNG. The state consists of the counter followed by the key. + * @param alg The RNG algorithm. + * @param delta The amount of advancement. + * @return a new instance of RngReadAndSkip + */ + public RngReadAndSkip rngReadAndSkip(Operand resource, Operand alg, + Operand delta) { + return RngReadAndSkip.create(scope, resource, alg, delta); + } + + /** + * Advance the counter of a counter-based RNG. + * The state of the RNG after + * {@code rng_skip(n)} will be the same as that after {@code stateful_uniform([n])} + * (or any other distribution). The actual increment added to the + * counter is an unspecified implementation detail. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param delta The amount of advancement. + * @return a new instance of RngSkip + */ + public RngSkip rngSkip(Operand resource, Operand algorithm, + Operand delta) { + return RngSkip.create(scope, resource, algorithm, delta); + } + /** * The StatefulRandomBinomial operation * - * @param data type for {@code output} output * @param resource The resource value * @param algorithm The algorithm value * @param shape The shape value @@ -343,7 +495,6 @@ public StatefulRandomBinomial statefulRandomBinomial /** * The StatefulRandomBinomial operation * - * @param data type for {@code output} output * @param resource The resource value * @param algorithm The algorithm value * @param shape The shape value @@ -364,7 +515,6 @@ public StatefulRandomBinomial stateful * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -379,7 +529,6 @@ public StatefulStandardNormal statefulStandardNormal(Operand data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -393,10 +542,117 @@ public StatefulStandardNormal statefulStandardNormal( return StatefulStandardNormal.create(scope, resource, algorithm, shape, dtype); } + /** + * Outputs random values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @return a new instance of StatefulTruncatedNormal, with default output types + */ + public StatefulTruncatedNormal statefulTruncatedNormal( + Operand resource, Operand algorithm, + Operand shape) { + return StatefulTruncatedNormal.create(scope, resource, algorithm, shape); + } + + /** + * Outputs random values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for {@code StatefulTruncatedNormal} output and operands + * @return a new instance of StatefulTruncatedNormal + */ + public StatefulTruncatedNormal statefulTruncatedNormal( + Operand resource, Operand algorithm, Operand shape, + Class dtype) { + return StatefulTruncatedNormal.create(scope, resource, algorithm, shape, dtype); + } + + /** + * Outputs random values from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [0, 1)}. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @return a new instance of StatefulUniform, with default output types + */ + public StatefulUniform statefulUniform(Operand resource, + Operand algorithm, Operand shape) { + return StatefulUniform.create(scope, resource, algorithm, shape); + } + + /** + * Outputs random values from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [0, 1)}. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for {@code StatefulUniform} output and operands + * @return a new instance of StatefulUniform + */ + public StatefulUniform statefulUniform(Operand resource, + Operand algorithm, Operand shape, Class dtype) { + return StatefulUniform.create(scope, resource, algorithm, shape, dtype); + } + + /** + * Outputs random integers from a uniform distribution. + * The generated values are uniform integers covering the whole range of {@code dtype}. + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for {@code StatefulUniformFullInt} output and operands + * @return a new instance of StatefulUniformFullInt + */ + public StatefulUniformFullInt statefulUniformFullInt( + Operand resource, Operand algorithm, Operand shape, + Class dtype) { + return StatefulUniformFullInt.create(scope, resource, algorithm, shape, dtype); + } + + /** + * Outputs random integers from a uniform distribution. + * The generated values are uniform integers in the range {@code [minval, maxval)}. + * The lower bound {@code minval} is included in the range, while the upper bound + * {@code maxval} is excluded. + *

The random integers are slightly biased unless {@code maxval - minval} is an exact + * power of two. The bias is small for values of {@code maxval - minval} significantly + * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). + * + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param minval Minimum value (inclusive, scalar). + * @param maxval Maximum value (exclusive, scalar). + * @param data type for {@code StatefulUniformInt} output and operands + * @return a new instance of StatefulUniformInt + */ + public StatefulUniformInt statefulUniformInt( + Operand resource, Operand algorithm, Operand shape, + Operand minval, Operand maxval) { + return StatefulUniformInt.create(scope, resource, algorithm, shape, minval, maxval); + } + /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -411,7 +667,6 @@ public StatelessMultinomial statelessMultinomial(Operand data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -426,12 +681,126 @@ public StatelessMultinomial statelessMultinomial( return StatelessMultinomial.create(scope, logits, numSamples, seed, outputDtype); } + /** + * The StatelessParameterizedTruncatedNormal operation + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param means The mean parameter of each batch. + * @param stddevs The standard deviation parameter of each batch. Must be greater than 0. + * @param minvals The minimum cutoff. May be -infinity. + * @param maxvals The maximum cutoff. May be +infinity, and must be more than the minval + * for each batch. + * @param data type for {@code StatelessParameterizedTruncatedNormal} output and operands + * @return a new instance of StatelessParameterizedTruncatedNormal + */ + public StatelessParameterizedTruncatedNormal statelessParameterizedTruncatedNormal( + Operand shape, Operand seed, Operand means, + Operand stddevs, Operand minvals, Operand maxvals) { + return StatelessParameterizedTruncatedNormal.create(scope, shape, seed, means, stddevs, minvals, maxvals); + } + + /** + * Outputs deterministic pseudorandom random numbers from a binomial distribution. + * Outputs random values from a binomial distribution. + *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param counts The counts of the binomial distribution. Must be broadcastable with {@code probs}, + * and broadcastable with the rightmost dimensions of {@code shape}. + * @param probs The probability of success for the binomial distribution. Must be broadcastable + * with {@code counts} and broadcastable with the rightmost dimensions of {@code shape}. + * @param data type for {@code StatelessRandomBinomial} output and operands + * @return a new instance of StatelessRandomBinomial, with default output types + */ + public StatelessRandomBinomial statelessRandomBinomial( + Operand shape, Operand seed, Operand counts, + Operand probs) { + return StatelessRandomBinomial.create(scope, shape, seed, counts, probs); + } + + /** + * Outputs deterministic pseudorandom random numbers from a binomial distribution. + * Outputs random values from a binomial distribution. + *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param counts The counts of the binomial distribution. Must be broadcastable with {@code probs}, + * and broadcastable with the rightmost dimensions of {@code shape}. + * @param probs The probability of success for the binomial distribution. Must be broadcastable + * with {@code counts} and broadcastable with the rightmost dimensions of {@code shape}. + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomBinomial} output and operands + * @param data type for {@code StatelessRandomBinomial} output and operands + * @return a new instance of StatelessRandomBinomial + */ + public StatelessRandomBinomial statelessRandomBinomial( + Operand shape, Operand seed, Operand counts, + Operand probs, Class dtype) { + return StatelessRandomBinomial.create(scope, shape, seed, counts, probs, dtype); + } + + /** + * Outputs deterministic pseudorandom random numbers from a gamma distribution. + * Outputs random values from a gamma distribution. + *

The outputs are a deterministic function of the inputs. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param alpha The concentration of the gamma distribution. Shape must match the rightmost + * dimensions of {@code shape}. + * @param data type for {@code StatelessRandomGammaV3} output and operands + * @return a new instance of StatelessRandomGamma + */ + public StatelessRandomGamma statelessRandomGamma( + Operand shape, Operand key, + Operand counter, Operand alg, Operand alpha) { + return StatelessRandomGamma.create(scope, shape, key, counter, alg, alpha); + } + + /** + * Picks the best counter-based RNG algorithm based on device. + * This op picks the best counter-based RNG algorithm based on device. + * + * @return a new instance of StatelessRandomGetAlg + */ + public StatelessRandomGetAlg statelessRandomGetAlg() { + return StatelessRandomGetAlg.create(scope); + } + + /** + * Scrambles seed into key and counter, using the best algorithm based on device. + * This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers). + * + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomGetKeyCounter + */ + public StatelessRandomGetKeyCounter statelessRandomGetKeyCounter( + Operand seed) { + return StatelessRandomGetKeyCounter.create(scope, seed); + } + + /** + * Picks the best algorithm based on device, and scrambles seed into key and counter. + * This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers). + * + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomGetKeyCounterAlg + */ + public StatelessRandomGetKeyCounterAlg statelessRandomGetKeyCounterAlg( + Operand seed) { + return StatelessRandomGetKeyCounterAlg.create(scope, seed); + } + /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomNormal, with default output types @@ -446,7 +815,6 @@ public StatelessRandomNormal statelessRandomNormal(OperandThe outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -458,13 +826,66 @@ public StatelessRandomNormal statelessRandomNormal( return StatelessRandomNormal.create(scope, shape, seed, dtype); } + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @return a new instance of StatelessRandomNormalV2, with default output types + */ + public StatelessRandomNormalV2 statelessRandomNormalV2(Operand shape, + Operand key, Operand counter, Operand alg) { + return StatelessRandomNormalV2.create(scope, shape, key, counter, alg); + } + + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomNormalV2} output and operands + * @return a new instance of StatelessRandomNormalV2 + */ + public StatelessRandomNormalV2 statelessRandomNormalV2( + Operand shape, Operand key, + Operand counter, Operand alg, Class dtype) { + return StatelessRandomNormalV2.create(scope, shape, key, counter, alg, dtype); + } + + /** + * Outputs deterministic pseudorandom random numbers from a Poisson distribution. + * Outputs random values from a Poisson distribution. + *

The outputs are a deterministic function of {@code shape}, {@code seed}, and {@code lam}. + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param lam The rate of the Poisson distribution. Shape must match the rightmost dimensions + * of {@code shape}. + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomPoisson} output and operands + * @return a new instance of StatelessRandomPoisson + */ + public StatelessRandomPoisson statelessRandomPoisson( + Operand shape, Operand seed, + Operand lam, Class dtype) { + return StatelessRandomPoisson.create(scope, shape, seed, lam, dtype); + } + /** * Outputs deterministic pseudorandom random values from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomUniform, with default output types @@ -480,7 +901,6 @@ public StatelessRandomUniform statelessRandomUniform(OperandThe outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -492,6 +912,117 @@ public StatelessRandomUniform statelessRandomUniform( return StatelessRandomUniform.create(scope, shape, seed, dtype); } + /** + * Outputs deterministic pseudorandom random integers from a uniform distribution. + * The generated values are uniform integers covering the whole range of {@code dtype}. + *

The outputs are a deterministic function of {@code shape} and {@code seed}. + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomUniformFullInt} output and operands + * @return a new instance of StatelessRandomUniformFullInt + */ + public StatelessRandomUniformFullInt statelessRandomUniformFullInt( + Operand shape, Operand seed, Class dtype) { + return StatelessRandomUniformFullInt.create(scope, shape, seed, dtype); + } + + /** + * Outputs deterministic pseudorandom random integers from a uniform distribution. + * The generated values are uniform integers covering the whole range of {@code dtype}. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomUniformFullIntV2} output and operands + * @return a new instance of StatelessRandomUniformFullIntV2 + */ + public StatelessRandomUniformFullIntV2 statelessRandomUniformFullIntV2( + Operand shape, Operand key, + Operand counter, Operand alg, Class dtype) { + return StatelessRandomUniformFullIntV2.create(scope, shape, key, counter, alg, dtype); + } + + /** + * Outputs deterministic pseudorandom random integers from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. + *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code minval}, and {@code maxval}. + * + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param minval Minimum value (inclusive, scalar). + * @param maxval Maximum value (exclusive, scalar). + * @param data type for {@code StatelessRandomUniformInt} output and operands + * @return a new instance of StatelessRandomUniformInt + */ + public StatelessRandomUniformInt statelessRandomUniformInt( + Operand shape, Operand seed, Operand minval, + Operand maxval) { + return StatelessRandomUniformInt.create(scope, shape, seed, minval, maxval); + } + + /** + * Outputs deterministic pseudorandom random integers from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter}, {@code alg}, {@code minval} and {@code maxval}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param minval Minimum value (inclusive, scalar). + * @param maxval Maximum value (exclusive, scalar). + * @param data type for {@code StatelessRandomUniformIntV2} output and operands + * @return a new instance of StatelessRandomUniformIntV2 + */ + public StatelessRandomUniformIntV2 statelessRandomUniformIntV2( + Operand shape, Operand key, + Operand counter, Operand alg, Operand minval, Operand maxval) { + return StatelessRandomUniformIntV2.create(scope, shape, key, counter, alg, minval, maxval); + } + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [0, 1)}. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @return a new instance of StatelessRandomUniformV2, with default output types + */ + public StatelessRandomUniformV2 statelessRandomUniformV2( + Operand shape, Operand key, + Operand counter, Operand alg) { + return StatelessRandomUniformV2.create(scope, shape, key, counter, alg); + } + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * The generated values follow a uniform distribution in the range {@code [0, 1)}. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param dtype The type of the output. + * @param data type for {@code StatelessRandomUniformV2} output and operands + * @return a new instance of StatelessRandomUniformV2 + */ + public StatelessRandomUniformV2 statelessRandomUniformV2( + Operand shape, Operand key, + Operand counter, Operand alg, Class dtype) { + return StatelessRandomUniformV2.create(scope, shape, key, counter, alg, dtype); + } + /** * Outputs deterministic pseudorandom values from a truncated normal distribution. * The generated values follow a normal distribution with mean 0 and standard @@ -499,7 +1030,6 @@ public StatelessRandomUniform statelessRandomUniform( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessTruncatedNormal, with default output types @@ -516,7 +1046,6 @@ public StatelessTruncatedNormal statelessTruncatedNormal( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -528,13 +1057,79 @@ public StatelessTruncatedNormal statelessTruncatedNormal( return StatelessTruncatedNormal.create(scope, shape, seed, dtype); } + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @return a new instance of StatelessTruncatedNormalV2, with default output types + */ + public StatelessTruncatedNormalV2 statelessTruncatedNormalV2( + Operand shape, Operand key, + Operand counter, Operand alg) { + return StatelessTruncatedNormalV2.create(scope, shape, key, counter, alg); + } + + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. + * + * @param shape The shape of the output tensor. + * @param key Key for the counter-based RNG algorithm (shape uint64[1]). + * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. + * @param alg The RNG algorithm (shape int32[]). + * @param dtype The type of the output. + * @param data type for {@code StatelessTruncatedNormalV2} output and operands + * @return a new instance of StatelessTruncatedNormalV2 + */ + public StatelessTruncatedNormalV2 statelessTruncatedNormalV2( + Operand shape, Operand key, + Operand counter, Operand alg, Class dtype) { + return StatelessTruncatedNormalV2.create(scope, shape, key, counter, alg, dtype); + } + + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + *

For each batch, this op picks a single set of sampled candidate labels. + *

The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of ThreadUnsafeUnigramCandidateSampler + */ + public ThreadUnsafeUnigramCandidateSampler threadUnsafeUnigramCandidateSampler( + Operand trueClasses, Long numTrue, Long numSampled, Boolean unique, Long rangeMax, + ThreadUnsafeUnigramCandidateSampler.Options... options) { + return ThreadUnsafeUnigramCandidateSampler.create(scope, trueClasses, numTrue, numSampled, unique, rangeMax, options); + } + /** * Outputs random values from a truncated normal distribution. * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java index 4b00d892c94..68cb802f86d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java @@ -28,7 +28,7 @@ /** * An API for building {@code shape} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class ShapeOps { private final Scope scope; @@ -388,7 +388,8 @@ public Operand tail(Shape shape, Class type) { * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape */ @@ -401,7 +402,8 @@ public Operand take(Shape shape, Operand n) { * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @param type the shape datatype. * @param the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -416,7 +418,8 @@ public Operand take(Shape shape, Operand n, Class Operand takeLast(Shape shape, Operand * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @param type the shape datatype. * @param the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java index 0fb3e8edde0..ac5703c264a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java @@ -48,7 +48,7 @@ /** * An API for building {@code signal} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class SignalOps { private final Scope scope; @@ -125,7 +125,6 @@ public BatchIfft3d batchIfft3d(Operand input) { * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT} output and operands * @return a new instance of Fft @@ -139,7 +138,6 @@ public Fft fft(Operand input) { * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT2D} output and operands * @return a new instance of Fft2d @@ -153,7 +151,6 @@ public Fft2d fft2d(Operand input) { * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT3D} output and operands * @return a new instance of Fft3d @@ -173,7 +170,6 @@ public Fft3d fft3d(Operand input) { *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -190,7 +186,6 @@ public FftNd fftNd(Operand input, Operand fftLen * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT} output and operands * @return a new instance of Ifft @@ -204,7 +199,6 @@ public Ifft ifft(Operand input) { * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT2D} output and operands * @return a new instance of Ifft2d @@ -218,7 +212,6 @@ public Ifft2d ifft2d(Operand input) { * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT3D} output and operands * @return a new instance of Ifft3d @@ -238,7 +231,6 @@ public Ifft3d ifft3d(Operand input) { *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -264,7 +256,6 @@ public IfftNd ifftNd(Operand input, Operand fftL * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @return a new instance of Irfft, with default output types @@ -287,7 +278,6 @@ public Irfft irfft(Operand input, Operand fft * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Treal The value of the Treal attribute @@ -314,7 +304,6 @@ public Irfft irfft(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @return a new instance of Irfft2d, with default output types @@ -338,7 +327,6 @@ public Irfft2d irfft2d(Operand input, Operand * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Treal The value of the Treal attribute @@ -365,7 +353,6 @@ public Irfft2d irfft2d(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @return a new instance of Irfft3d, with default output types @@ -389,7 +376,6 @@ public Irfft3d irfft3d(Operand input, Operand * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Treal The value of the Treal attribute @@ -413,7 +399,6 @@ public Irfft3d irfft3d(Operand input, *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -436,7 +421,6 @@ public IrfftNd irfftNd(Operand input, Operand *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -460,7 +444,6 @@ public IrfftNd irfftNd(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Tcomplex The value of the Tcomplex attribute @@ -484,7 +467,6 @@ public Rfft rfft(Operand input, Operand< * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Tcomplex The value of the Tcomplex attribute @@ -508,7 +490,6 @@ public Rfft2d rfft2d(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Tcomplex The value of the Tcomplex attribute @@ -532,7 +513,6 @@ public Rfft3d rfft3d(Operand input, *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java index c2b253fe29e..f6f83acce58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java @@ -17,13 +17,18 @@ // package org.tensorflow.op; +import java.util.List; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.sparse.AddManySparseToTensorsMap; import org.tensorflow.op.sparse.AddSparseToTensorsMap; +import org.tensorflow.op.sparse.ConvertToListOfSparseCoreCooTensors; +import org.tensorflow.op.sparse.ConvertToSparseCoreCsrWrappedCooTensor; +import org.tensorflow.op.sparse.DenseCountSparseOutput; import org.tensorflow.op.sparse.DenseToDenseSetOperation; import org.tensorflow.op.sparse.DenseToSparseSetOperation; import org.tensorflow.op.sparse.DeserializeSparse; +import org.tensorflow.op.sparse.GetStatsFromListOfSparseCoreCooTensors; import org.tensorflow.op.sparse.SparseAccumulatorApplyGradient; import org.tensorflow.op.sparse.SparseAccumulatorTakeGradient; import org.tensorflow.op.sparse.SparseAdd; @@ -31,6 +36,7 @@ import org.tensorflow.op.sparse.SparseBincount; import org.tensorflow.op.sparse.SparseConcat; import org.tensorflow.op.sparse.SparseConditionalAccumulator; +import org.tensorflow.op.sparse.SparseCountSparseOutput; import org.tensorflow.op.sparse.SparseCross; import org.tensorflow.op.sparse.SparseCrossHashed; import org.tensorflow.op.sparse.SparseDenseCwiseAdd; @@ -66,6 +72,7 @@ import org.tensorflow.op.sparse.SparseToSparseSetOperation; import org.tensorflow.op.sparse.TakeManySparseFromTensorsMap; import org.tensorflow.types.TBool; +import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -75,7 +82,7 @@ /** * An API for building {@code sparse} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class SparseOps { private final Scope scope; @@ -149,6 +156,74 @@ public AddSparseToTensorsMap addSparseToTensorsMap(Operand sparseIndices return AddSparseToTensorsMap.create(scope, sparseIndices, sparseValues, sparseShape, options); } + /** + * The ConvertToListOfSparseCoreCooTensors operation + * + * @param indicesOrRowSplits The indicesOrRowSplits value + * @param values The values value + * @param weights The weights value + * @param sampleCount The value of the sampleCount attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param rowOffset The value of the rowOffset attribute + * @param colOffset The value of the colOffset attribute + * @param colShift The value of the colShift attribute + * @param numScShards The value of the numScShards attribute + * @param stackedTableSampleCount The value of the stackedTableSampleCount attribute + * @param combiner The value of the combiner attribute + * @return a new instance of ConvertToListOfSparseCoreCooTensors + */ + public ConvertToListOfSparseCoreCooTensors convertToListOfSparseCoreCooTensors( + Operand indicesOrRowSplits, Operand values, Operand weights, + Long sampleCount, Long numScPerChip, Long rowOffset, Long colOffset, Long colShift, + Long numScShards, Long stackedTableSampleCount, String combiner) { + return ConvertToListOfSparseCoreCooTensors.create(scope, indicesOrRowSplits, values, weights, sampleCount, numScPerChip, rowOffset, colOffset, colShift, numScShards, stackedTableSampleCount, combiner); + } + + /** + * The ConvertToSparseCoreCsrWrappedCooTensor operation + * + * @param sortedRowIdsList The sortedRowIdsList value + * @param sortedColIdsList The sortedColIdsList value + * @param sortedGainsList The sortedGainsList value + * @param idCountsList The idCountsList value + * @param splits The splits value + * @param sampleCountPerSc The value of the sampleCountPerSc attribute + * @param numReplica The value of the numReplica attribute + * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute + * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param tableName The value of the tableName attribute + * @param allowIdDropping The value of the allowIdDropping attribute + * @return a new instance of ConvertToSparseCoreCsrWrappedCooTensor + */ + public ConvertToSparseCoreCsrWrappedCooTensor convertToSparseCoreCsrWrappedCooTensor( + Iterable> sortedRowIdsList, Iterable> sortedColIdsList, + Iterable> sortedGainsList, Iterable> idCountsList, + Operand splits, Long sampleCountPerSc, Long numReplica, Long maxMinibatchesPerSc, + Long maxIdsPerChipPerSample, Long tableVocabSize, Long featureWidth, String tableName, + Boolean allowIdDropping) { + return ConvertToSparseCoreCsrWrappedCooTensor.create(scope, sortedRowIdsList, sortedColIdsList, sortedGainsList, idCountsList, splits, sampleCountPerSc, numReplica, maxMinibatchesPerSc, maxIdsPerChipPerSample, tableVocabSize, featureWidth, tableName, allowIdDropping); + } + + /** + * Performs sparse-output bin counting for a tf.tensor input. + * Counts the number of times each value occurs in the input. + * + * @param values Tensor containing data to count. + * @param weights A Tensor of the same shape as indices containing per-index weight values. May + * also be the empty tensor if no weights are used. + * @param binaryOutput Whether to output the number of occurrences of each value or 1. + * @param options carries optional attribute values + * @param data type for {@code DenseCountSparseOutput} output and operands + * @return a new instance of DenseCountSparseOutput + */ + public DenseCountSparseOutput denseCountSparseOutput( + Operand values, Operand weights, Boolean binaryOutput, + DenseCountSparseOutput.Options... options) { + return DenseCountSparseOutput.create(scope, values, weights, binaryOutput, options); + } + /** * Applies set operation along last dimension of 2 {@code Tensor} inputs. * See SetOperationOp::SetOperationFromContext for values of {@code set_operation}. @@ -158,7 +233,6 @@ public AddSparseToTensorsMap addSparseToTensorsMap(Operand sparseIndices * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. * Dimension {@code n} contains values in a set, duplicates are allowed but ignored. * @param set2 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set1}. @@ -188,7 +262,6 @@ public DenseToDenseSetOperation denseToDenseSetOperation(Op * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. * Dimension {@code n} contains values in a set, duplicates are allowed but ignored. * @param set2Indices 2D {@code Tensor}, indices of a {@code SparseTensor}. Must be in row-major @@ -251,7 +324,6 @@ public DenseToSparseSetOperation denseToSparseSetOperation( * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param serializedSparse The serialized {@code SparseTensor} objects. The last dimension * must have 3 columns. * @param dtype The {@code dtype} of the serialized {@code SparseTensor} objects. @@ -263,6 +335,29 @@ public DeserializeSparse deserializeSparse( return DeserializeSparse.create(scope, serializedSparse, dtype); } + /** + * The GetStatsFromListOfSparseCoreCooTensors operation + * + * @param rowIdsList The rowIdsList value + * @param colIdsList The colIdsList value + * @param gainsList The gainsList value + * @param sampleCountList The value of the sampleCountList attribute + * @param colOffsetList The value of the colOffsetList attribute + * @param numReplica The value of the numReplica attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @return a new instance of GetStatsFromListOfSparseCoreCooTensors + */ + public GetStatsFromListOfSparseCoreCooTensors getStatsFromListOfSparseCoreCooTensors( + Iterable> rowIdsList, Iterable> colIdsList, + Iterable> gainsList, List sampleCountList, List colOffsetList, + Long numReplica, Long tableVocabSize, Long featureWidth, Long numScPerChip, + String tableName) { + return GetStatsFromListOfSparseCoreCooTensors.create(scope, rowIdsList, colIdsList, gainsList, sampleCountList, colOffsetList, numReplica, tableVocabSize, featureWidth, numScPerChip, tableName); + } + /** * Applies a sparse gradient to a given accumulator. * Does not add if local_step is smaller than the accumulator's @@ -296,7 +391,6 @@ public SparseAccumulatorApplyGradient sparseAccumulatorApplyGradient(Operand data type for {@code values} output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type @@ -323,7 +417,6 @@ public SparseAccumulatorTakeGradient sparseAccumulatorTakeG * only for a positive value. *

In the following shapes, {@code nnz} is the count after taking {@code thresh} into account. * - * @param data type for {@code sum_values} output * @param aIndices 2-D. The {@code indices} of the first {@code SparseTensor}, size {@code [nnz, ndims]} Matrix. * @param aValues 1-D. The {@code values} of the first {@code SparseTensor}, size {@code [nnz]} Vector. * @param aShape 1-D. The {@code shape} of the first {@code SparseTensor}, size {@code [ndims]} Vector. @@ -348,7 +441,6 @@ public SparseAdd sparseAdd(Operand aIndices, Operan * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. * - * @param data type for {@code a_val_grad} output * @param backpropValGrad 1-D with shape {@code [nnz(sum)]}. The gradient with respect to * the non-empty values of the sum. * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor} A, size {@code [nnz(A), ndims]}. @@ -372,7 +464,6 @@ public SparseAddGrad sparseAddGrad(Operand backpropValGr * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param indices 2D int64 {@code Tensor}. * @param values 1D int {@code Tensor}. * @param denseShape 1D int64 {@code Tensor}. @@ -431,7 +522,6 @@ public SparseBincount sparseBincount( * [b c ] [ ] [b c ] * * - * @param data type for {@code output_values} output * @param indices 2-D. Indices of each input {@code SparseTensor}. * @param values 1-D. Non-empty values of each {@code SparseTensor}. * @param shapes 1-D. Shapes of each {@code SparseTensor}. @@ -465,6 +555,26 @@ public SparseConditionalAccumulator sparseConditionalAccumulat return SparseConditionalAccumulator.create(scope, dtype, shape, options); } + /** + * Performs sparse-output bin counting for a sparse tensor input. + * Counts the number of times each value occurs in the input. + * + * @param indices Tensor containing the indices of the sparse tensor to count. + * @param values Tensor containing values of the sparse tensor to count. + * @param denseShape Tensor containing the dense shape of the sparse tensor to count. + * @param weights A Tensor of the same shape as indices containing per-index weight values. + * May also be the empty tensor if no weights are used. + * @param binaryOutput Whether to output the number of occurrences of each value or 1. + * @param options carries optional attribute values + * @param data type for {@code SparseCountSparseOutput} output and operands + * @return a new instance of SparseCountSparseOutput + */ + public SparseCountSparseOutput sparseCountSparseOutput( + Operand indices, Operand values, Operand denseShape, + Operand weights, Boolean binaryOutput, SparseCountSparseOutput.Options... options) { + return SparseCountSparseOutput.create(scope, indices, values, denseShape, weights, binaryOutput, options); + } + /** * Generates sparse cross from a list of sparse and dense tensors. * The op takes two lists, one of 2D {@code SparseTensor} and one of 2D {@code Tensor}, each @@ -582,7 +692,6 @@ public SparseCrossHashed sparseCrossHashed(Iterable> indices, * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -601,7 +710,6 @@ public SparseDenseCwiseAdd sparseDenseCwiseAdd(OperandLimitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -623,7 +731,6 @@ public SparseDenseCwiseDiv sparseDenseCwiseDiv(OperandLimitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -674,7 +781,6 @@ public SparseDenseCwiseMul sparseDenseCwiseMul(Operand * - * @param data type for {@code output_values} output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. * @param denseShape 1-D. the shape of the sparse tensor. @@ -699,7 +805,6 @@ public SparseFillEmptyRows sparseFillEmptyRows(Operand data type for {@code d_values} output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. * @param data type for {@code SparseFillEmptyRowsGrad} output and operands @@ -744,7 +849,6 @@ public SparseMatMul sparseMatMul(Operand a, Operand data type for {@code output} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -773,7 +877,6 @@ public SparseReduceMax sparseReduceMax(Operand in * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -802,7 +905,6 @@ public SparseReduceMaxSparse sparseReduceMaxSparse( * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -831,7 +933,6 @@ public SparseReduceSum sparseReduceSum(Operand inpu * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -856,7 +957,6 @@ public SparseReduceSumSparse sparseReduceSumSparse( *

If the tensor has rank {@code R} and {@code N} non-empty values, {@code input_indices} has * shape {@code [N, R]}, input_values has length {@code N}, and input_shape has length {@code R}. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -901,7 +1001,6 @@ public SparseReshape sparseReshape(Operand inputIndices, Operand *

Like {@code SegmentMean}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -921,8 +1020,6 @@ public SparseSegmentMean sparseSegmentMean(Operand dat * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. @@ -945,7 +1042,6 @@ public SparseSegmentMeanGrad sparse * the section on segmentation * for an explanation of segments. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -965,7 +1061,6 @@ public SparseSegmentMeanWithNumSegments sparseSegmentMean * N is the size of the segment being reduced. *

See {@code tf.sparse.segment_sum} for usage examples. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -985,8 +1080,6 @@ public SparseSegmentSqrtN sparseSegmentSqrtN(Operand d * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. @@ -1010,7 +1103,6 @@ public SparseSegmentSqrtNGrad spars * the section on segmentation * for an explanation of segments. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1055,7 +1147,6 @@ public SparseSegmentSqrtNWithNumSegments sparseSegmentSqr * tf.segment_sum(c, tf.constant([0, 0, 1])) * * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1075,8 +1166,6 @@ public SparseSegmentSum sparseSegmentSum(Operand data, * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentSum op. * @param indices indices passed to the corresponding SparseSegmentSum op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSum op. @@ -1118,7 +1207,6 @@ public SparseSegmentSumGrad sparseS * # [ 0 0 0 0]] * * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1152,7 +1240,6 @@ public SparseSegmentSumWithNumSegments sparseSegmentSumWi * [ ] * * - * @param data type for {@code output_values} output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. @@ -1174,7 +1261,6 @@ public SparseSlice sparseSlice(Operand indices, Ope * the sliced {@code SparseTensor}, and outputs the gradients w.r.t. * the non-empty values of input {@code SparseTensor}. * - * @param data type for {@code val_grad} output * @param backpropValGrad 1-D. The gradient with respect to * the non-empty values of the sliced {@code SparseTensor}. * @param inputIndices 2-D. The {@code indices} of the input {@code SparseTensor}. @@ -1203,7 +1289,6 @@ public SparseSliceGrad sparseSliceGrad(Operand backpropV *

Hence, the {@code SparseTensor} result has exactly the same non-zero indices and * shape. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code NNZ x R} matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. * @param spValues 1-D. {@code NNZ} non-empty values corresponding to {@code sp_indices}. @@ -1220,7 +1305,6 @@ public SparseSoftmax sparseSoftmax(Operand spIndi * Returns the element-wise max of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param data type for {@code output_values} output * @param aIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * @param aValues 1-D. {@code N} non-empty values corresponding to {@code a_indices}. @@ -1241,7 +1325,6 @@ public SparseSparseMaximum sparseSparseMaximum(Operand data type for {@code output_values} output * @param aIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * @param aValues 1-D. {@code N} non-empty values corresponding to {@code a_indices}. @@ -1279,7 +1362,6 @@ public SparseSparseMinimum sparseSparseMinimum(Operand * - * @param data type for {@code output_values} output * @param splitDim 0-D. The dimension along which to split. Must be in the range * {@code [0, rank(shape))}. * @param indices 2-D tensor represents the indices of the sparse tensor. @@ -1300,7 +1382,6 @@ public SparseSplit sparseSplit(Operand splitDim, * Adds up a {@code SparseTensor} and a dense {@code Tensor}, producing a dense {@code Tensor}. * This Op does not require {@code a_indices} be sorted in standard lexicographic order. * - * @param data type for {@code output} output * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor}, with shape {@code [nnz, ndims]}. * @param aValues 1-D. The {@code values} of the {@code SparseTensor}, with shape {@code [nnz]}. * @param aShape 1-D. The {@code shape} of the {@code SparseTensor}, with shape {@code [ndims]}. @@ -1325,7 +1406,6 @@ public SparseTensorDenseAdd sparseTensor * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). * - * @param data type for {@code product} output * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor}, size {@code [nnz, 2]} Matrix. * @param aValues 1-D. The {@code values} of the {@code SparseTensor}, size {@code [nnz]} Vector. * @param aShape 1-D. The {@code shape} of the {@code SparseTensor}, size {@code [2]} Vector. @@ -1359,7 +1439,6 @@ public SparseTensorDenseMatMul sparseTensorDenseMatMul( * contain any repeats. If {@code validate_indices} is true, these properties * are checked during execution. * - * @param data type for {@code dense} output * @param sparseIndices 0-D, 1-D, or 2-D. {@code sparse_indices[i]} contains the complete * index where {@code sparse_values[i]} will be placed. * @param outputShape 1-D. Shape of the dense output tensor. @@ -1399,7 +1478,6 @@ public SparseToDense sparseToDense( * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1Indices 2D {@code Tensor}, indices of a {@code SparseTensor}. Must be in row-major * order. * @param set1Values 1D {@code Tensor}, values of a {@code SparseTensor}. Must be in row-major @@ -1469,7 +1547,6 @@ public SparseToSparseSetOperation sparseToSparseSetOperatio * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param sparseHandles 1-D, The {@code N} serialized {@code SparseTensor} objects. * Shape: {@code [N]}. * @param dtype The {@code dtype} of the {@code SparseTensor} objects stored in the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java index 99c49b96686..56a82c2dbf6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java @@ -24,6 +24,8 @@ import org.tensorflow.op.strings.ReduceJoin; import org.tensorflow.op.strings.RegexFullMatch; import org.tensorflow.op.strings.RegexReplace; +import org.tensorflow.op.strings.StaticRegexFullMatch; +import org.tensorflow.op.strings.StaticRegexReplace; import org.tensorflow.op.strings.StringFormat; import org.tensorflow.op.strings.StringLength; import org.tensorflow.op.strings.StringNGrams; @@ -34,18 +36,23 @@ import org.tensorflow.op.strings.ToHashBucketFast; import org.tensorflow.op.strings.ToHashBucketStrong; import org.tensorflow.op.strings.ToNumber; +import org.tensorflow.op.strings.UnicodeDecode; +import org.tensorflow.op.strings.UnicodeDecodeWithOffsets; +import org.tensorflow.op.strings.UnicodeEncode; import org.tensorflow.op.strings.UnicodeScript; import org.tensorflow.op.strings.UnicodeTranscode; +import org.tensorflow.op.strings.UnsortedSegmentJoin; import org.tensorflow.op.strings.Upper; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; /** * An API for building {@code strings} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class StringsOps { private final Scope scope; @@ -181,6 +188,37 @@ public RegexReplace regexReplace(Operand input, Operand patter return RegexReplace.create(scope, input, pattern, rewrite, options); } + /** + * Check if the input matches the regex pattern. + * The input is a string tensor of any shape. The pattern is the + * regular expression to be matched with every element of the input tensor. + * The boolean values (True or False) of the output tensor indicate + * if the input matches the regex pattern provided. + *

The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * @param input A string tensor of the text to be processed. + * @param pattern The regular expression to match the input. + * @return a new instance of StaticRegexFullMatch + */ + public StaticRegexFullMatch staticRegexFullMatch(Operand input, String pattern) { + return StaticRegexFullMatch.create(scope, input, pattern); + } + + /** + * Replaces the match of pattern in input with rewrite. + * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * @param input The text to be processed. + * @param pattern The regular expression to match the input. + * @param rewrite The rewrite to be applied to the matched expression. + * @param options carries optional attribute values + * @return a new instance of StaticRegexReplace + */ + public StaticRegexReplace staticRegexReplace(Operand input, String pattern, + String rewrite, StaticRegexReplace.Options... options) { + return StaticRegexReplace.create(scope, input, pattern, rewrite, options); + } + /** * Formats a string template using a list of tensors. * Formats a string template using a list of tensors, pretty-printing tensor summaries. @@ -222,7 +260,6 @@ public StringLength stringLength(Operand input, StringLength.Options... * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. * - * @param data type for {@code ngrams_splits} output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. @@ -472,7 +509,6 @@ public ToHashBucketStrong toHashBucketStrong(Operand input, Long numBuc * * * - * @param data type for {@code output} output * @param stringTensor The stringTensor value * @return a new instance of ToNumber, with default output types */ @@ -495,7 +531,6 @@ public ToNumber toNumber(Operand stringTensor) { * * * - * @param data type for {@code output} output * @param stringTensor The stringTensor value * @param outType The numeric type to interpret each string in {@code string_tensor} as. * @param data type for {@code StringToNumber} output and operands @@ -505,6 +540,160 @@ public ToNumber toNumber(Operand stringTensor, C return ToNumber.create(scope, stringTensor, outType); } + /** + * Decodes each string in {@code input} into a sequence of Unicode code points. + * The character codepoints for all strings are returned using a single vector + * {@code char_values}, with strings expanded to characters in row-major order. + *

The {@code row_splits} tensor indicates where the codepoints for + * each input string begin and end within the {@code char_values} tensor. + * In particular, the values for the {@code i}th + * string (in row-major order) are stored in the slice + * {@code [row_splits[i]:row_splits[i+1]]}. Thus: + *

    + *
  • {@code char_values[row_splits[i]+j]} is the Unicode codepoint for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th + * string (in row-major order).
  • + *
+ * + * @param input The text to be decoded. Can have any shape. Note that the output is flattened + * to a vector of char values. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. + * @param options carries optional attribute values + * @return a new instance of UnicodeDecode, with default output types + */ + public UnicodeDecode unicodeDecode(Operand input, String inputEncoding, + UnicodeDecode.Options[] options) { + return UnicodeDecode.create(scope, input, inputEncoding, options); + } + + /** + * Decodes each string in {@code input} into a sequence of Unicode code points. + * The character codepoints for all strings are returned using a single vector + * {@code char_values}, with strings expanded to characters in row-major order. + *

The {@code row_splits} tensor indicates where the codepoints for + * each input string begin and end within the {@code char_values} tensor. + * In particular, the values for the {@code i}th + * string (in row-major order) are stored in the slice + * {@code [row_splits[i]:row_splits[i+1]]}. Thus: + *

    + *
  • {@code char_values[row_splits[i]+j]} is the Unicode codepoint for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th + * string (in row-major order).
  • + *
+ * + * @param input The text to be decoded. Can have any shape. Note that the output is flattened + * to a vector of char values. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. + * @param Tsplits The value of the Tsplits attribute + * @param options carries optional attribute values + * @param data type for {@code UnicodeDecode} output and operands + * @return a new instance of UnicodeDecode + */ + public UnicodeDecode unicodeDecode(Operand input, + String inputEncoding, Class Tsplits, UnicodeDecode.Options... options) { + return UnicodeDecode.create(scope, input, inputEncoding, Tsplits, options); + } + + /** + * Decodes each string in {@code input} into a sequence of Unicode code points. + * The character codepoints for all strings are returned using a single vector + * {@code char_values}, with strings expanded to characters in row-major order. + * Similarly, the character start byte offsets are returned using a single vector + * {@code char_to_byte_starts}, with strings expanded in row-major order. + *

The {@code row_splits} tensor indicates where the codepoints and start offsets for + * each input string begin and end within the {@code char_values} and + * {@code char_to_byte_starts} tensors. In particular, the values for the {@code i}th + * string (in row-major order) are stored in the slice + * {@code [row_splits[i]:row_splits[i+1]]}. Thus: + *

    + *
  • {@code char_values[row_splits[i]+j]} is the Unicode codepoint for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code char_to_bytes_starts[row_splits[i]+j]} is the start byte offset for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th + * string (in row-major order).
  • + *
+ * + * @param input The text to be decoded. Can have any shape. Note that the output is flattened + * to a vector of char values. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. + * @param options carries optional attribute values + * @return a new instance of UnicodeDecodeWithOffsets, with default output types + */ + public UnicodeDecodeWithOffsets unicodeDecodeWithOffsets(Operand input, + String inputEncoding, UnicodeDecodeWithOffsets.Options[] options) { + return UnicodeDecodeWithOffsets.create(scope, input, inputEncoding, options); + } + + /** + * Decodes each string in {@code input} into a sequence of Unicode code points. + * The character codepoints for all strings are returned using a single vector + * {@code char_values}, with strings expanded to characters in row-major order. + * Similarly, the character start byte offsets are returned using a single vector + * {@code char_to_byte_starts}, with strings expanded in row-major order. + *

The {@code row_splits} tensor indicates where the codepoints and start offsets for + * each input string begin and end within the {@code char_values} and + * {@code char_to_byte_starts} tensors. In particular, the values for the {@code i}th + * string (in row-major order) are stored in the slice + * {@code [row_splits[i]:row_splits[i+1]]}. Thus: + *

    + *
  • {@code char_values[row_splits[i]+j]} is the Unicode codepoint for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code char_to_bytes_starts[row_splits[i]+j]} is the start byte offset for the {@code j}th + * character in the {@code i}th string (in row-major order).
  • + *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th + * string (in row-major order).
  • + *
+ * + * @param input The text to be decoded. Can have any shape. Note that the output is flattened + * to a vector of char values. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. + * @param Tsplits The value of the Tsplits attribute + * @param options carries optional attribute values + * @param data type for {@code UnicodeDecodeWithOffsets} output and operands + * @return a new instance of UnicodeDecodeWithOffsets + */ + public UnicodeDecodeWithOffsets unicodeDecodeWithOffsets( + Operand input, String inputEncoding, Class Tsplits, + UnicodeDecodeWithOffsets.Options... options) { + return UnicodeDecodeWithOffsets.create(scope, input, inputEncoding, Tsplits, options); + } + + /** + * Encode a tensor of ints into unicode strings. + * Returns a vector of strings, where {@code output[i]} is constructed by encoding the + * Unicode codepoints in {@code input_values[input_splits[i]:input_splits[i+1]]} + * using {@code output_encoding}. + *
+ *

Example: + *

+   *  input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
+   *  input_splits = [0, 5, 10]
+   *  output_encoding = 'UTF-8'
+   *
+   *  output = ['Hello', 'World']
+   *  
+ * + * @param inputValues A 1D tensor containing the unicode codepoints that should be encoded. + * @param inputSplits A 1D tensor specifying how the unicode codepoints should be split into strings. + * In particular, {@code output[i]} is constructed by encoding the codepoints in the + * slice {@code input_values[input_splits[i]:input_splits[i+1]]}. + * @param outputEncoding Unicode encoding of the output strings. Valid encodings are: {@code "UTF-8", "UTF-16-BE", and "UTF-32-BE"}. + * @param options carries optional attribute values + * @return a new instance of UnicodeEncode + */ + public UnicodeEncode unicodeEncode(Operand inputValues, + Operand inputSplits, String outputEncoding, + UnicodeEncode.Options... options) { + return UnicodeEncode.create(scope, inputValues, inputSplits, outputEncoding, options); + } + /** * Determine the script codes of a given tensor of Unicode integer code points. * This operation converts Unicode code points to script codes corresponding to @@ -585,6 +774,21 @@ public UnicodeTranscode unicodeTranscode(Operand input, String inputEnc return UnicodeTranscode.create(scope, input, inputEncoding, outputEncoding, options); } + /** + * The UnsortedSegmentJoin operation + * + * @param inputs The inputs value + * @param segmentIds The segmentIds value + * @param numSegments The numSegments value + * @param options carries optional attribute values + * @return a new instance of UnsortedSegmentJoin + */ + public UnsortedSegmentJoin unsortedSegmentJoin(Operand inputs, + Operand segmentIds, Operand numSegments, + UnsortedSegmentJoin.Options... options) { + return UnsortedSegmentJoin.create(scope, inputs, segmentIds, numSegments, options); + } + /** * Converts all lowercase characters into their respective uppercase replacements. * Example: diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SummaryOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SummaryOps.java index eae152ec2a8..d7690d11d71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SummaryOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SummaryOps.java @@ -19,20 +19,37 @@ import org.tensorflow.Operand; import org.tensorflow.op.summary.AudioSummary; +import org.tensorflow.op.summary.CloseSummaryWriter; +import org.tensorflow.op.summary.CreateSummaryDbWriter; +import org.tensorflow.op.summary.CreateSummaryFileWriter; +import org.tensorflow.op.summary.FlushSummaryWriter; import org.tensorflow.op.summary.HistogramSummary; import org.tensorflow.op.summary.ImageSummary; +import org.tensorflow.op.summary.ImportEvent; import org.tensorflow.op.summary.MergeSummary; import org.tensorflow.op.summary.ScalarSummary; +import org.tensorflow.op.summary.StatsAggregatorSummary; +import org.tensorflow.op.summary.SummaryWriter; import org.tensorflow.op.summary.TensorSummary; +import org.tensorflow.op.summary.WriteAudioSummary; +import org.tensorflow.op.summary.WriteGraphSummary; +import org.tensorflow.op.summary.WriteHistogramSummary; +import org.tensorflow.op.summary.WriteImageSummary; +import org.tensorflow.op.summary.WriteRawProtoSummary; +import org.tensorflow.op.summary.WriteScalarSummary; +import org.tensorflow.op.summary.WriteSummary; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** * An API for building {@code summary} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class SummaryOps { private final Scope scope; @@ -68,6 +85,58 @@ public AudioSummary audioSummary(Operand tag, Operand tensor, return AudioSummary.create(scope, tag, tensor, sampleRate, options); } + /** + * The CloseSummaryWriter operation + * + * @param writer The writer value + * @return a new instance of CloseSummaryWriter + */ + public CloseSummaryWriter closeSummaryWriter(Operand writer) { + return CloseSummaryWriter.create(scope, writer); + } + + /** + * The CreateSummaryDbWriter operation + * + * @param writer The writer value + * @param dbUri The dbUri value + * @param experimentName The experimentName value + * @param runName The runName value + * @param userName The userName value + * @return a new instance of CreateSummaryDbWriter + */ + public CreateSummaryDbWriter createSummaryDbWriter(Operand writer, + Operand dbUri, Operand experimentName, Operand runName, + Operand userName) { + return CreateSummaryDbWriter.create(scope, writer, dbUri, experimentName, runName, userName); + } + + /** + * The CreateSummaryFileWriter operation + * + * @param writer The writer value + * @param logdir The logdir value + * @param maxQueue The maxQueue value + * @param flushMillis The flushMillis value + * @param filenameSuffix The filenameSuffix value + * @return a new instance of CreateSummaryFileWriter + */ + public CreateSummaryFileWriter createSummaryFileWriter(Operand writer, + Operand logdir, Operand maxQueue, Operand flushMillis, + Operand filenameSuffix) { + return CreateSummaryFileWriter.create(scope, writer, logdir, maxQueue, flushMillis, filenameSuffix); + } + + /** + * The FlushSummaryWriter operation + * + * @param writer The writer value + * @return a new instance of FlushSummaryWriter + */ + public FlushSummaryWriter flushSummaryWriter(Operand writer) { + return FlushSummaryWriter.create(scope, writer); + } + /** * Outputs a {@code Summary} protocol buffer with a histogram. * The generated @@ -133,6 +202,17 @@ public ImageSummary imageSummary(Operand tag, Operand writer, Operand event) { + return ImportEvent.create(scope, writer, event); + } + /** * Merges summaries. * This op creates a @@ -163,6 +243,26 @@ public ScalarSummary scalarSummary(Operand tags, Operand iterator) { + return StatsAggregatorSummary.create(scope, iterator); + } + + /** + * The SummaryWriter operation + * + * @param options carries optional attribute values + * @return a new instance of SummaryWriter + */ + public SummaryWriter summaryWriter(SummaryWriter.Options... options) { + return SummaryWriter.create(scope, options); + } + /** * Outputs a {@code Summary} protocol buffer with a tensor and per-plugin data. * @@ -177,6 +277,118 @@ public TensorSummary tensorSummary(Operand tag, Operand writer, Operand step, + Operand tag, Operand tensor, Operand sampleRate, + WriteAudioSummary.Options... options) { + return WriteAudioSummary.create(scope, writer, step, tag, tensor, sampleRate, options); + } + + /** + * Writes a graph summary. + * Writes TensorFlow graph {@code tensor} at {@code step} using summary {@code writer}. + * + * @param writer The writer value + * @param step The step value + * @param tensor The tensor value + * @return a new instance of WriteGraphSummary + */ + public WriteGraphSummary writeGraphSummary(Operand writer, Operand step, + Operand tensor) { + return WriteGraphSummary.create(scope, writer, step, tensor); + } + + /** + * Writes a histogram summary. + * Writes histogram {@code values} at {@code step} with {@code tag} using summary {@code writer}. + * + * @param writer The writer value + * @param step The step value + * @param tag The tag value + * @param values The values value + * @return a new instance of WriteHistogramSummary + */ + public WriteHistogramSummary writeHistogramSummary(Operand writer, + Operand step, Operand tag, Operand values) { + return WriteHistogramSummary.create(scope, writer, step, tag, values); + } + + /** + * Writes an image summary. + * Writes image {@code tensor} at {@code step} with {@code tag} using summary {@code writer}. + * {@code tensor} is image with shape [height, width, channels]. + * + * @param writer The writer value + * @param step The step value + * @param tag The tag value + * @param tensor The tensor value + * @param badColor The badColor value + * @param options carries optional attribute values + * @return a new instance of WriteImageSummary + */ + public WriteImageSummary writeImageSummary(Operand writer, Operand step, + Operand tag, Operand tensor, Operand badColor, + WriteImageSummary.Options... options) { + return WriteImageSummary.create(scope, writer, step, tag, tensor, badColor, options); + } + + /** + * Writes a serialized proto summary. + * Writes {@code tensor}, a serialized proto at {@code step} using summary {@code writer}. + * + * @param writer The writer value + * @param step The step value + * @param tensor The tensor value + * @return a new instance of WriteRawProtoSummary + */ + public WriteRawProtoSummary writeRawProtoSummary(Operand writer, + Operand step, Operand tensor) { + return WriteRawProtoSummary.create(scope, writer, step, tensor); + } + + /** + * Writes a scalar summary. + * Writes scalar {@code value} at {@code step} with {@code tag} using summary {@code writer}. + * + * @param writer The writer value + * @param step The step value + * @param tag The tag value + * @param value The value value + * @return a new instance of WriteScalarSummary + */ + public WriteScalarSummary writeScalarSummary(Operand writer, + Operand step, Operand tag, Operand value) { + return WriteScalarSummary.create(scope, writer, step, tag, value); + } + + /** + * Writes a tensor summary. + * Writes {@code tensor} at {@code step} with {@code tag} using summary {@code writer}. + * + * @param writer The writer value + * @param step The step value + * @param tensor The tensor value + * @param tag The tag value + * @param summaryMetadata The summaryMetadata value + * @return a new instance of WriteSummary + */ + public WriteSummary writeSummary(Operand writer, Operand step, + Operand tensor, Operand tag, Operand summaryMetadata) { + return WriteSummary.create(scope, writer, step, tensor, tag, summaryMetadata); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java index 83e6f9d60b0..f6ea8e12178 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java @@ -21,30 +21,115 @@ import org.tensorflow.ConcreteFunction; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.tpu.AllToAll; import org.tensorflow.op.tpu.CollateTPUEmbeddingMemory; +import org.tensorflow.op.tpu.CompilationResult; import org.tensorflow.op.tpu.Compile; import org.tensorflow.op.tpu.CompileSucceededAssert; import org.tensorflow.op.tpu.ConfigureAndInitializeGlobalTPU; +import org.tensorflow.op.tpu.ConfigureDistributedTPU; +import org.tensorflow.op.tpu.ConfigureTPUEmbedding; import org.tensorflow.op.tpu.ConfigureTPUEmbeddingHost; import org.tensorflow.op.tpu.ConfigureTPUEmbeddingMemory; import org.tensorflow.op.tpu.ConnectTPUEmbeddingHosts; +import org.tensorflow.op.tpu.ConvertToCooTensor; +import org.tensorflow.op.tpu.CrossReplicaSum; import org.tensorflow.op.tpu.DTensorRestore; +import org.tensorflow.op.tpu.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch; +import org.tensorflow.op.tpu.DynamicEnqueueTPUEmbeddingRaggedTensorBatch; +import org.tensorflow.op.tpu.EmbeddingActivations; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingArbitraryTensorBatch; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingBatch; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingIntegerBatch; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingRaggedTensorBatch; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingSparseBatch; +import org.tensorflow.op.tpu.EnqueueTPUEmbeddingSparseTensorBatch; import org.tensorflow.op.tpu.Execute; import org.tensorflow.op.tpu.ExecuteAndUpdateVariables; import org.tensorflow.op.tpu.ExecuteTPUEmbeddingPartitioner; import org.tensorflow.op.tpu.FinalizeTPUEmbedding; +import org.tensorflow.op.tpu.GetMinibatchSplitsWithPhysicalReplica; +import org.tensorflow.op.tpu.GetMinibatchesInCsrWithPhysicalReplica; +import org.tensorflow.op.tpu.GetTpuTaskId; +import org.tensorflow.op.tpu.GlobalIterId; +import org.tensorflow.op.tpu.InfeedDequeue; +import org.tensorflow.op.tpu.InfeedDequeueTuple; +import org.tensorflow.op.tpu.InfeedEnqueue; +import org.tensorflow.op.tpu.InfeedEnqueuePrelinearizedBuffer; +import org.tensorflow.op.tpu.InfeedEnqueueTuple; +import org.tensorflow.op.tpu.IsTPUEmbeddingInitialized; +import org.tensorflow.op.tpu.LoadAllTPUEmbeddingParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingADAMParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingAdadeltaParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingAdagradMomentumParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingAdagradParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingCenteredRMSPropParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingFTRLParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingFrequencyEstimatorParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingMDLAdagradLightParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingMomentumParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingProximalAdagradParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingProximalYogiParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingRMSPropParameters; +import org.tensorflow.op.tpu.LoadTPUEmbeddingStochasticGradientDescentParameters; +import org.tensorflow.op.tpu.MergeDedupData; +import org.tensorflow.op.tpu.OrdinalSelector; +import org.tensorflow.op.tpu.OutfeedDequeue; +import org.tensorflow.op.tpu.OutfeedDequeueTuple; +import org.tensorflow.op.tpu.OutfeedDequeueTupleV2; +import org.tensorflow.op.tpu.OutfeedDequeueV2; +import org.tensorflow.op.tpu.OutfeedEnqueue; +import org.tensorflow.op.tpu.OutfeedEnqueueTuple; +import org.tensorflow.op.tpu.PartitionedCall; +import org.tensorflow.op.tpu.PartitionedInput; import org.tensorflow.op.tpu.PartitionedOutput; +import org.tensorflow.op.tpu.Prelinearize; +import org.tensorflow.op.tpu.PrelinearizeTuple; +import org.tensorflow.op.tpu.RecvTPUEmbeddingActivations; +import org.tensorflow.op.tpu.ReplicateMetadata; +import org.tensorflow.op.tpu.ReplicatedInput; +import org.tensorflow.op.tpu.ReplicatedOutput; +import org.tensorflow.op.tpu.RetrieveAllTPUEmbeddingParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingADAMParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdadeltaParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdagradMomentumParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdagradParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingCenteredRMSPropParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingFTRLParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingFrequencyEstimatorParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingMDLAdagradLightParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingMomentumParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingProximalAdagradParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingProximalYogiParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingRMSPropParameters; +import org.tensorflow.op.tpu.RetrieveTPUEmbeddingStochasticGradientDescentParameters; +import org.tensorflow.op.tpu.SendTPUEmbeddingGradients; +import org.tensorflow.op.tpu.ShutdownDistributedTPU; import org.tensorflow.op.tpu.ShutdownTPUSystem; +import org.tensorflow.op.tpu.SplitDedupData; +import org.tensorflow.op.tpu.StoreMinibatchStatisticsInFdo; +import org.tensorflow.op.tpu.TPUAnnotateTensorsWithDynamicShape; +import org.tensorflow.op.tpu.TPUCompilationResult; +import org.tensorflow.op.tpu.TPUCopyWithDynamicShape; +import org.tensorflow.op.tpu.TPUEmbeddingActivations; +import org.tensorflow.op.tpu.TPUReplicateMetadata; +import org.tensorflow.op.tpu.TPUReplicatedInput; +import org.tensorflow.op.tpu.TPUReplicatedOutput; +import org.tensorflow.op.tpu.TPUReshardVariables; import org.tensorflow.op.tpu.TPURoundRobin; import org.tensorflow.op.tpu.TpuHandleToProtoKey; +import org.tensorflow.op.tpu.WorkerHeartbeat; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** * An API for building {@code tpu} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class TpuOps { private final Scope scope; @@ -56,6 +141,38 @@ public final class TpuOps { this.ops = ops; } + /** + * An Op to exchange data across TPU replicas. + * On each replica, the input is split into {@code split_count} blocks along + * {@code split_dimension} and send to the other replicas given group_assignment. After + * receiving {@code split_count} - 1 blocks from other replicas, we concatenate the + * blocks along {@code concat_dimension} as the output. + *

For example, suppose there are 2 TPU replicas: + * replica 0 receives input: {@code [[A, B]]} + * replica 1 receives input: {@code [[C, D]]} + *

group_assignment={@code [[0, 1]]} + * concat_dimension=0 + * split_dimension=1 + * split_count=2 + *

replica 0's output: {@code [[A], [C]]} + * replica 1's output: {@code [[B], [D]]} + * + * @param input The local input to the sum. + * @param groupAssignment An int32 tensor with shape + * [num_groups, num_replicas_per_group]. {@code group_assignment[i]} represents the + * replica ids in the ith subgroup. + * @param concatDimension The dimension number to concatenate. + * @param splitDimension The dimension number to split. + * @param splitCount The number of splits, this number must equal to the sub-group + * size(group_assignment.get_shape()[1]) + * @param data type for {@code AllToAll} output and operands + * @return a new instance of AllToAll + */ + public AllToAll allToAll(Operand input, Operand groupAssignment, + Long concatDimension, Long splitDimension, Long splitCount) { + return AllToAll.create(scope, input, groupAssignment, concatDimension, splitDimension, splitCount); + } + /** * An op that merges the string-encoded memory config protos from all hosts. * @@ -68,6 +185,18 @@ public CollateTPUEmbeddingMemory collateTPUEmbeddingMemory( return CollateTPUEmbeddingMemory.create(scope, memoryConfigs); } + /** + * Returns the result of a TPU compilation. + * This operation returns the result of a TPU compilation as a serialized + * CompilationResultProto, which holds a status and an error message if an error + * occurred during compilation. + * + * @return a new instance of CompilationResult + */ + public CompilationResult compilationResult() { + return CompilationResult.create(scope); + } + /** * Compiles a computations for execution on one or more TPU devices. * For the internal use of the distributed TPU compiler. @@ -122,6 +251,28 @@ public ConfigureAndInitializeGlobalTPU configureAndInitializeGlobalTPU( return ConfigureAndInitializeGlobalTPU.create(scope, options); } + /** + * Sets up the centralized structures for a distributed TPU system. + * + * @param options carries optional attribute values + * @return a new instance of ConfigureDistributedTPU + */ + public ConfigureDistributedTPU configureDistributedTPU( + ConfigureDistributedTPU.Options... options) { + return ConfigureDistributedTPU.create(scope, options); + } + + /** + * Sets up TPUEmbedding in a distributed TPU system. + * + * @param config Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + * describes the embedding lookups of the program. + * @return a new instance of ConfigureTPUEmbedding + */ + public ConfigureTPUEmbedding configureTPUEmbedding(String config) { + return ConfigureTPUEmbedding.create(scope, config); + } + /** * An op that configures the TPUEmbedding software on a host. * @@ -163,6 +314,41 @@ public ConnectTPUEmbeddingHosts connectTPUEmbeddingHosts( return ConnectTPUEmbeddingHosts.create(scope, networkConfigs); } + /** + * The ConvertToCooTensor operation + * + * @param indicesOrRowSplits The indicesOrRowSplits value + * @param values The values value + * @param weights The weights value + * @param sampleCount The value of the sampleCount attribute + * @param combiner The value of the combiner attribute + * @return a new instance of ConvertToCooTensor + */ + public ConvertToCooTensor convertToCooTensor(Operand indicesOrRowSplits, + Operand values, Operand weights, Long sampleCount, String combiner) { + return ConvertToCooTensor.create(scope, indicesOrRowSplits, values, weights, sampleCount, combiner); + } + + /** + * An Op to sum inputs across replicated TPU instances. + * Each instance supplies its own input. + *

For example, suppose there are 8 TPU instances: {@code [A, B, C, D, E, F, G, H]}. + * Passing group_assignment={@code [[0,2,4,6],[1,3,5,7]]} sets {@code A, C, E, G} as group 0, + * and {@code B, D, F, H} as group 1. Thus we get the outputs: + * {@code [A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]}. + * + * @param input The local input to the sum. + * @param groupAssignment An int32 tensor with shape + * [num_groups, num_replicas_per_group]. {@code group_assignment[i]} represents the + * replica ids in the ith subgroup. + * @param data type for {@code CrossReplicaSum} output and operands + * @return a new instance of CrossReplicaSum + */ + public CrossReplicaSum crossReplicaSum(Operand input, + Operand groupAssignment) { + return CrossReplicaSum.create(scope, input, groupAssignment); + } + /** * The DTensorRestoreV2 operation * @@ -180,6 +366,285 @@ public DTensorRestore dTensorRestore(Operand prefix, Operand t return DTensorRestore.create(scope, prefix, tensorNames, shapeAndSlices, inputShapes, inputLayouts, dtypes); } + /** + * Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + * embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. + *

The tensors at corresponding positions in the three input lists (sample_indices, + * embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + * + * @param sampleIndicesOrRowSplits A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

Or a list of rank 1 Tensors specifying the row splits for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row splits is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param aggregationWeights A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param deviceOrdinal The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + * @param options carries optional attribute values + * @return a new instance of DynamicEnqueueTPUEmbeddingArbitraryTensorBatch + */ + public DynamicEnqueueTPUEmbeddingArbitraryTensorBatch dynamicEnqueueTPUEmbeddingArbitraryTensorBatch( + Iterable> sampleIndicesOrRowSplits, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + Operand deviceOrdinal, + DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.Options... options) { + return DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.create(scope, sampleIndicesOrRowSplits, embeddingIndices, aggregationWeights, modeOverride, deviceOrdinal, options); + } + + /** + * The DynamicEnqueueTPUEmbeddingRaggedTensorBatch operation + * + * @param sampleSplits The sampleSplits value + * @param embeddingIndices The embeddingIndices value + * @param aggregationWeights The aggregationWeights value + * @param modeOverride The modeOverride value + * @param deviceOrdinal The deviceOrdinal value + * @param tableIds The value of the tableIds attribute + * @param options carries optional attribute values + * @return a new instance of DynamicEnqueueTPUEmbeddingRaggedTensorBatch + */ + public DynamicEnqueueTPUEmbeddingRaggedTensorBatch dynamicEnqueueTPUEmbeddingRaggedTensorBatch( + Iterable> sampleSplits, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + Operand deviceOrdinal, List tableIds, + DynamicEnqueueTPUEmbeddingRaggedTensorBatch.Options... options) { + return DynamicEnqueueTPUEmbeddingRaggedTensorBatch.create(scope, sampleSplits, embeddingIndices, aggregationWeights, modeOverride, deviceOrdinal, tableIds, options); + } + + /** + * An op enabling differentiation of TPU Embeddings. + * This op simply returns its first input, which is assumed to have been sliced + * from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of + * this op, and its first argument being a trainable Variable, enables automatic + * differentiation of graphs containing embeddings via the TPU Embedding Python + * libraries. + * + * @param embeddingVariable A trainable variable, enabling optimizers to find this op. + * @param slicedActivations The embedding activations Tensor to return. + * @param tableId The id of the table in the embedding layer configuration from which + * these activations were computed. + * @param lookupId Identifier of the set of embedding indices which produced these + * activations. + * @return a new instance of EmbeddingActivations + */ + public EmbeddingActivations embeddingActivations(Operand embeddingVariable, + Operand slicedActivations, Long tableId, Long lookupId) { + return EmbeddingActivations.create(scope, embeddingVariable, slicedActivations, tableId, lookupId); + } + + /** + * Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + * embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. + *

The tensors at corresponding positions in the three input lists (sample_indices, + * embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + * + * @param sampleIndicesOrRowSplits A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

Or a list of rank 1 Tensors specifying the row splits for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row splits is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param aggregationWeights A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingArbitraryTensorBatch + */ + public EnqueueTPUEmbeddingArbitraryTensorBatch enqueueTPUEmbeddingArbitraryTensorBatch( + Iterable> sampleIndicesOrRowSplits, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + EnqueueTPUEmbeddingArbitraryTensorBatch.Options... options) { + return EnqueueTPUEmbeddingArbitraryTensorBatch.create(scope, sampleIndicesOrRowSplits, embeddingIndices, aggregationWeights, modeOverride, options); + } + + /** + * An op that enqueues a list of input batch tensors to TPUEmbedding. + * An op that enqueues a list of input batch tensors to TPUEmbedding. + * + * @param batch A list of 1D tensors, one for each embedding table, containing the + * batch inputs encoded as dist_belief.SparseFeatures protos. If the weight + * field in the SparseFeatures proto is not populated for an ID, a weight of + * 1.0 is assumed. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingBatch + */ + public EnqueueTPUEmbeddingBatch enqueueTPUEmbeddingBatch(Iterable> batch, + Operand modeOverride, EnqueueTPUEmbeddingBatch.Options... options) { + return EnqueueTPUEmbeddingBatch.create(scope, batch, modeOverride, options); + } + + /** + * An op that enqueues a list of input batch tensors to TPUEmbedding. + * + * @param batch A list of 1D tensors, one for each embedding table, containing the + * indices into the tables. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingIntegerBatch + */ + public EnqueueTPUEmbeddingIntegerBatch enqueueTPUEmbeddingIntegerBatch( + Iterable> batch, Operand modeOverride, + EnqueueTPUEmbeddingIntegerBatch.Options... options) { + return EnqueueTPUEmbeddingIntegerBatch.create(scope, batch, modeOverride, options); + } + + /** + * Eases the porting of code that uses tf.nn.embedding_lookup(). + * sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. table_ids[i] indicates which embedding table to look up ith + * feature. + *

The tensors at corresponding positions in two of the input lists, + * embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + * + * @param sampleSplits A list of rank 1 Tensors specifying the break points for splitting + * embedding_indices and aggregation_weights into rows. + * It corresponds to ids.row_splits in embedding_lookup(), when ids is a + * RaggedTensor. + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding tables. + * It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor. + * @param aggregationWeights A list of rank 1 Tensors containing per training example + * aggregation weights. It corresponds to the values field of a RaggedTensor + * with the same row_splits as ids in embedding_lookup(), when ids is a + * RaggedTensor. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param tableIds A list of integers specifying the identifier of the embedding table + * (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the + * corresponding input. The ith input is looked up using table_ids[i]. The size + * of the table_ids list must be equal to that of sample_indices, + * embedding_indices and aggregation_weights. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingRaggedTensorBatch + */ + public EnqueueTPUEmbeddingRaggedTensorBatch enqueueTPUEmbeddingRaggedTensorBatch( + Iterable> sampleSplits, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + List tableIds, EnqueueTPUEmbeddingRaggedTensorBatch.Options... options) { + return EnqueueTPUEmbeddingRaggedTensorBatch.create(scope, sampleSplits, embeddingIndices, aggregationWeights, modeOverride, tableIds, options); + } + + /** + * An op that enqueues TPUEmbedding input indices from a SparseTensor. + * This Op eases the porting of code that uses embedding_lookup_sparse(), + * although some Python preprocessing of the SparseTensor arguments to + * embedding_lookup_sparse() is required to produce the arguments to this Op, + * since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training + * step. + *

The tensors at corresponding positions in the three input lists + * must have the same shape, i.e. rank 1 with dim_size() equal to the total + * number of lookups into the table described by the corresponding table_id. + * + * @param sampleIndices A list of rank 1 Tensors specifying the training example and + * feature to which the corresponding embedding_indices and aggregation_weights + * values belong. sample_indices[i] must equal b * nf + f, where nf is the + * number of features from the corresponding table, f is in [0, nf), and + * b is in [0, batch size). + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding tables. + * @param aggregationWeights A list of rank 1 Tensors containing per sample -- i.e. per + * (training example, feature) -- aggregation weights. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingSparseBatch + */ + public EnqueueTPUEmbeddingSparseBatch enqueueTPUEmbeddingSparseBatch( + Iterable> sampleIndices, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + EnqueueTPUEmbeddingSparseBatch.Options... options) { + return EnqueueTPUEmbeddingSparseBatch.create(scope, sampleIndices, embeddingIndices, aggregationWeights, modeOverride, options); + } + + /** + * Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + * sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. table_ids[i] indicates which embedding table to look up ith + * feature. + *

The tensors at corresponding positions in the three input lists (sample_indices, + * embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + * + * @param sampleIndices A list of rank 1 Tensors specifying the training example to + * which the corresponding embedding_indices and aggregation_weights values + * belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding tables. + * It corresponds to sp_ids.values in embedding_lookup_sparse(). + * @param aggregationWeights A list of rank 1 Tensors containing per training example + * aggregation weights. It corresponds to sp_weights.values in + * embedding_lookup_sparse(). + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param tableIds A list of integers specifying the identifier of the embedding table + * (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the + * corresponding input. The ith input is looked up using table_ids[i]. The size + * of the table_ids list must be equal to that of sample_indices, + * embedding_indices and aggregation_weights. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingSparseTensorBatch + */ + public EnqueueTPUEmbeddingSparseTensorBatch enqueueTPUEmbeddingSparseTensorBatch( + Iterable> sampleIndices, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + List tableIds, EnqueueTPUEmbeddingSparseTensorBatch.Options... options) { + return EnqueueTPUEmbeddingSparseTensorBatch.create(scope, sampleIndices, embeddingIndices, aggregationWeights, modeOverride, tableIds, options); + } + /** * Op that loads and executes a TPU program on a TPU device. * For the internal use of the distributed TPU compiler. @@ -244,11 +709,646 @@ public FinalizeTPUEmbedding finalizeTPUEmbedding(Operand commonConfig, return FinalizeTPUEmbedding.create(scope, commonConfig, memoryConfig); } + /** + * The GetMinibatchSplitsWithPhysicalReplica operation + * + * @param programKey The programKey value + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param gains The gains value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchSplits The value of the miniBatchSplits attribute + * @return a new instance of GetMinibatchSplitsWithPhysicalReplica + */ + public GetMinibatchSplitsWithPhysicalReplica getMinibatchSplitsWithPhysicalReplica( + Operand programKey, Operand rowIds, Operand colIds, + Operand gains, Long sampleCount, Long numReplica, Long tableVocabSize, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) { + return GetMinibatchSplitsWithPhysicalReplica.create(scope, programKey, rowIds, colIds, gains, sampleCount, numReplica, tableVocabSize, featureWidth, numScPerChip, tableName, miniBatchSplits); + } + + /** + * The GetMinibatchesInCsrWithPhysicalReplica operation + * + * @param programKey The programKey value + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param gains The gains value + * @param splits The splits value + * @param idCounts The idCounts value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute + * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchInCsr The value of the miniBatchInCsr attribute + * @return a new instance of GetMinibatchesInCsrWithPhysicalReplica + */ + public GetMinibatchesInCsrWithPhysicalReplica getMinibatchesInCsrWithPhysicalReplica( + Operand programKey, Operand rowIds, Operand colIds, + Operand gains, Operand splits, Operand idCounts, Long sampleCount, + Long numReplica, Long maxMinibatchesPerSc, Long maxIdsPerChipPerSample, Long tableVocabSize, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchInCsr) { + return GetMinibatchesInCsrWithPhysicalReplica.create(scope, programKey, rowIds, colIds, gains, splits, idCounts, sampleCount, numReplica, maxMinibatchesPerSc, maxIdsPerChipPerSample, tableVocabSize, featureWidth, numScPerChip, tableName, miniBatchInCsr); + } + + /** + * An op returns the TPU task ID from TPU topology. + * This op is to return the TPU task ID from TPU topology. + * + * @return a new instance of GetTpuTaskId + */ + public GetTpuTaskId getTpuTaskId() { + return GetTpuTaskId.create(scope); + } + + /** + * The GlobalIterId operation + * + * @return a new instance of GlobalIterId + */ + public GlobalIterId globalIterId() { + return GlobalIterId.create(scope); + } + + /** + * A placeholder op for a value that will be fed into the computation. + * + * @param dtype The type of elements in the tensor. + * @param shape The shape of the tensor. + * @param data type for {@code InfeedDequeue} output and operands + * @return a new instance of InfeedDequeue + */ + public InfeedDequeue infeedDequeue(Class dtype, Shape shape) { + return InfeedDequeue.create(scope, dtype, shape); + } + + /** + * Fetches multiple values from infeed as an XLA tuple. + * + * @param dtypes The element types of each element in {@code outputs}. + * @param shapes The shapes of each tensor in {@code outputs}. + * @return a new instance of InfeedDequeueTuple + */ + public InfeedDequeueTuple infeedDequeueTuple(List> dtypes, + List shapes) { + return InfeedDequeueTuple.create(scope, dtypes, shapes); + } + + /** + * An op which feeds a single Tensor value into the computation. + * + * @param input A tensor that will be provided using the infeed mechanism. + * @param options carries optional attribute values + * @return a new instance of InfeedEnqueue + */ + public InfeedEnqueue infeedEnqueue(Operand input, + InfeedEnqueue.Options... options) { + return InfeedEnqueue.create(scope, input, options); + } + + /** + * An op which enqueues prelinearized buffer into TPU infeed. + * + * @param input A variant tensor representing linearized output. + * @param options carries optional attribute values + * @return a new instance of InfeedEnqueuePrelinearizedBuffer + */ + public InfeedEnqueuePrelinearizedBuffer infeedEnqueuePrelinearizedBuffer( + Operand input, InfeedEnqueuePrelinearizedBuffer.Options... options) { + return InfeedEnqueuePrelinearizedBuffer.create(scope, input, options); + } + + /** + * Feeds multiple Tensor values into the computation as an XLA tuple. + * + * @param inputs A list of tensors that will be provided using the infeed mechanism. + * @param shapes The shapes of each tensor in {@code inputs}. + * @param options carries optional attribute values + * @return a new instance of InfeedEnqueueTuple + */ + public InfeedEnqueueTuple infeedEnqueueTuple(Iterable> inputs, List shapes, + InfeedEnqueueTuple.Options... options) { + return InfeedEnqueueTuple.create(scope, inputs, shapes, options); + } + + /** + * Whether TPU Embedding is initialized in a distributed TPU system. + * + * @param options carries optional attribute values + * @return a new instance of IsTPUEmbeddingInitialized + */ + public IsTPUEmbeddingInitialized isTPUEmbeddingInitialized( + IsTPUEmbeddingInitialized.Options... options) { + return IsTPUEmbeddingInitialized.create(scope, options); + } + + /** + * An op that loads optimization parameters into embedding memory. + * An op that loads optimization parameters into embedding memory. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding + * table configuration. For example, this op is used to install parameters that are + * loaded from a checkpoint before a training loop is executed. For Adagrad, + * auxiliary1 should be the accumulators. For SGD, all of the auxiliary* values + * should be empty. For FTRL, auxiliary1 should be the accumulators and auxiliary2 + * should be the linear terms. For ADAM, auxiliary1 should be the momenta and + * auxiliary2 should be the velocities. + * + * @param parameters A list of tensors, one for each embedding table, + * containing the initial embedding table parameters to use in embedding + * lookups. + * @param auxiliary1 A list of tensors, one for each embedding table, containing the + * initial values of the first auxiliary optimization parameter to use in embedding + * training loop updates. The shape of each entry is ignored (and thus can be + * empty) for those tables whose optimization algorithms do not have at least one + * auxiliary parameter. + * @param auxiliary2 A list of tensors, one for each embedding table, containing the + * initial values of the second auxiliary optimization parameter to use in + * embedding training loop updates. The shape of each entry is ignored (and thus + * can be empty) for those tables whose optimization algorithms do not have at + * least two auxiliary + * @param auxiliary3 A list of tensors, one for each embedding table, containing the + * initial values of the third auxiliary optimization parameter to use in embedding + * training loop updates. The shape of each entry is ignored (and thus can be + * empty) for those tables whose optimization algorithms do not have three + * auxiliary parameters. + * @param auxiliary4 A list of tensors, one for each embedding table, containing the + * initial values of the second auxiliary optimization parameter to use in + * embedding training loop updates. The shape of each entry is ignored (and thus + * can be empty) for those tables whose optimization algorithms do not have at + * least four auxiliary + * @param auxiliary5 A list of tensors, one for each embedding table, containing the + * initial values of the third auxiliary optimization parameter to use in embedding + * training loop updates. The shape of each entry is ignored (and thus can be + * empty) for those tables whose optimization algorithms do not have five + * auxiliary parameters. + * @param auxiliary6 A list of tensors, one for each embedding table, containing the + * initial values of the second auxiliary optimization parameter to use in + * embedding training loop updates. The shape of each entry is ignored (and thus + * can be empty) for those tables whose optimization algorithms do not have at + * least six auxiliary + * @param auxiliary7 A list of tensors, one for each embedding table, containing the + * initial values of the third auxiliary optimization parameter to use in embedding + * training loop updates. The shape of each entry is ignored (and thus can be + * empty) for those tables whose optimization algorithms do not have sevan + * auxiliary parameters. + * @param config An TPUEmbeddingConfiguration proto describing the + * table parameters being loaded, serialized to a string. + * @param numShards Number of shards into which the embedding tables are divided. + * @param shardId Identifier of shard for this operation. + * @return a new instance of LoadAllTPUEmbeddingParameters + */ + public LoadAllTPUEmbeddingParameters loadAllTPUEmbeddingParameters( + Iterable> parameters, Iterable> auxiliary1, + Iterable> auxiliary2, Iterable> auxiliary3, + Iterable> auxiliary4, Iterable> auxiliary5, + Iterable> auxiliary6, Iterable> auxiliary7, String config, + Long numShards, Long shardId) { + return LoadAllTPUEmbeddingParameters.create(scope, parameters, auxiliary1, auxiliary2, auxiliary3, auxiliary4, auxiliary5, auxiliary6, auxiliary7, config, numShards, shardId); + } + + /** + * Load ADAM embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the ADAM optimization algorithm. + * @param momenta Value of momenta used in the ADAM optimization algorithm. + * @param velocities Value of velocities used in the ADAM optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingADAMParameters + */ + public LoadTPUEmbeddingADAMParameters loadTPUEmbeddingADAMParameters(Operand parameters, + Operand momenta, Operand velocities, Long numShards, Long shardId, + LoadTPUEmbeddingADAMParameters.Options... options) { + return LoadTPUEmbeddingADAMParameters.create(scope, parameters, momenta, velocities, numShards, shardId, options); + } + + /** + * Load Adadelta embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the Adadelta optimization algorithm. + * @param accumulators Value of accumulators used in the Adadelta optimization algorithm. + * @param updates Value of updates used in the Adadelta optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingAdadeltaParameters + */ + public LoadTPUEmbeddingAdadeltaParameters loadTPUEmbeddingAdadeltaParameters( + Operand parameters, Operand accumulators, Operand updates, + Long numShards, Long shardId, LoadTPUEmbeddingAdadeltaParameters.Options... options) { + return LoadTPUEmbeddingAdadeltaParameters.create(scope, parameters, accumulators, updates, numShards, shardId, options); + } + + /** + * Load Adagrad Momentum embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the Adagrad Momentum optimization algorithm. + * @param accumulators Value of accumulators used in the Adagrad Momentum optimization algorithm. + * @param momenta Value of momenta used in the Adagrad Momentum optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingAdagradMomentumParameters + */ + public LoadTPUEmbeddingAdagradMomentumParameters loadTPUEmbeddingAdagradMomentumParameters( + Operand parameters, Operand accumulators, Operand momenta, + Long numShards, Long shardId, LoadTPUEmbeddingAdagradMomentumParameters.Options... options) { + return LoadTPUEmbeddingAdagradMomentumParameters.create(scope, parameters, accumulators, momenta, numShards, shardId, options); + } + + /** + * Load Adagrad embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the Adagrad optimization algorithm. + * @param accumulators Value of accumulators used in the Adagrad optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingAdagradParameters + */ + public LoadTPUEmbeddingAdagradParameters loadTPUEmbeddingAdagradParameters( + Operand parameters, Operand accumulators, Long numShards, Long shardId, + LoadTPUEmbeddingAdagradParameters.Options... options) { + return LoadTPUEmbeddingAdagradParameters.create(scope, parameters, accumulators, numShards, shardId, options); + } + + /** + * Load centered RMSProp embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the centered RMSProp optimization algorithm. + * @param ms Value of ms used in the centered RMSProp optimization algorithm. + * @param mom Value of mom used in the centered RMSProp optimization algorithm. + * @param mg Value of mg used in the centered RMSProp optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingCenteredRMSPropParameters + */ + public LoadTPUEmbeddingCenteredRMSPropParameters loadTPUEmbeddingCenteredRMSPropParameters( + Operand parameters, Operand ms, Operand mom, + Operand mg, Long numShards, Long shardId, + LoadTPUEmbeddingCenteredRMSPropParameters.Options... options) { + return LoadTPUEmbeddingCenteredRMSPropParameters.create(scope, parameters, ms, mom, mg, numShards, shardId, options); + } + + /** + * Load FTRL embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the FTRL optimization algorithm. + * @param accumulators Value of accumulators used in the FTRL optimization algorithm. + * @param linears Value of linears used in the FTRL optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingFTRLParameters + */ + public LoadTPUEmbeddingFTRLParameters loadTPUEmbeddingFTRLParameters(Operand parameters, + Operand accumulators, Operand linears, Long numShards, Long shardId, + LoadTPUEmbeddingFTRLParameters.Options... options) { + return LoadTPUEmbeddingFTRLParameters.create(scope, parameters, accumulators, linears, numShards, shardId, options); + } + + /** + * Load frequency estimator embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the frequency estimator optimization algorithm. + * @param lastHitStep Value of last_hit_step used in the frequency estimator optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingFrequencyEstimatorParameters + */ + public LoadTPUEmbeddingFrequencyEstimatorParameters loadTPUEmbeddingFrequencyEstimatorParameters( + Operand parameters, Operand lastHitStep, Long numShards, Long shardId, + LoadTPUEmbeddingFrequencyEstimatorParameters.Options... options) { + return LoadTPUEmbeddingFrequencyEstimatorParameters.create(scope, parameters, lastHitStep, numShards, shardId, options); + } + + /** + * Load MDL Adagrad Light embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the MDL Adagrad Light optimization algorithm. + * @param accumulators Value of accumulators used in the MDL Adagrad Light optimization algorithm. + * @param weights Value of weights used in the MDL Adagrad Light optimization algorithm. + * @param benefits Value of benefits used in the MDL Adagrad Light optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingMDLAdagradLightParameters + */ + public LoadTPUEmbeddingMDLAdagradLightParameters loadTPUEmbeddingMDLAdagradLightParameters( + Operand parameters, Operand accumulators, Operand weights, + Operand benefits, Long numShards, Long shardId, + LoadTPUEmbeddingMDLAdagradLightParameters.Options... options) { + return LoadTPUEmbeddingMDLAdagradLightParameters.create(scope, parameters, accumulators, weights, benefits, numShards, shardId, options); + } + + /** + * Load Momentum embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the Momentum optimization algorithm. + * @param momenta Value of momenta used in the Momentum optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingMomentumParameters + */ + public LoadTPUEmbeddingMomentumParameters loadTPUEmbeddingMomentumParameters( + Operand parameters, Operand momenta, Long numShards, Long shardId, + LoadTPUEmbeddingMomentumParameters.Options... options) { + return LoadTPUEmbeddingMomentumParameters.create(scope, parameters, momenta, numShards, shardId, options); + } + + /** + * Load proximal Adagrad embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the proximal Adagrad optimization algorithm. + * @param accumulators Value of accumulators used in the proximal Adagrad optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingProximalAdagradParameters + */ + public LoadTPUEmbeddingProximalAdagradParameters loadTPUEmbeddingProximalAdagradParameters( + Operand parameters, Operand accumulators, Long numShards, Long shardId, + LoadTPUEmbeddingProximalAdagradParameters.Options... options) { + return LoadTPUEmbeddingProximalAdagradParameters.create(scope, parameters, accumulators, numShards, shardId, options); + } + + /** + * The LoadTPUEmbeddingProximalYogiParameters operation + * + * @param parameters The parameters value + * @param v The v value + * @param m The m value + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingProximalYogiParameters + */ + public LoadTPUEmbeddingProximalYogiParameters loadTPUEmbeddingProximalYogiParameters( + Operand parameters, Operand v, Operand m, Long numShards, + Long shardId, LoadTPUEmbeddingProximalYogiParameters.Options... options) { + return LoadTPUEmbeddingProximalYogiParameters.create(scope, parameters, v, m, numShards, shardId, options); + } + + /** + * Load RMSProp embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the RMSProp optimization algorithm. + * @param ms Value of ms used in the RMSProp optimization algorithm. + * @param mom Value of mom used in the RMSProp optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingRMSPropParameters + */ + public LoadTPUEmbeddingRMSPropParameters loadTPUEmbeddingRMSPropParameters( + Operand parameters, Operand ms, Operand mom, Long numShards, + Long shardId, LoadTPUEmbeddingRMSPropParameters.Options... options) { + return LoadTPUEmbeddingRMSPropParameters.create(scope, parameters, ms, mom, numShards, shardId, options); + } + + /** + * Load SGD embedding parameters. + * An op that loads optimization parameters into HBM for embedding. Must be + * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to install + * parameters that are loaded from a checkpoint before a training loop is + * executed. + * + * @param parameters Value of parameters used in the stochastic gradient descent optimization algorithm. + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of LoadTPUEmbeddingStochasticGradientDescentParameters + */ + public LoadTPUEmbeddingStochasticGradientDescentParameters loadTPUEmbeddingStochasticGradientDescentParameters( + Operand parameters, Long numShards, Long shardId, + LoadTPUEmbeddingStochasticGradientDescentParameters.Options... options) { + return LoadTPUEmbeddingStochasticGradientDescentParameters.create(scope, parameters, numShards, shardId, options); + } + + /** + * An op merges elements of integer and float tensors into deduplication data as + * XLA tuple. + * This op merges outputs of SplitDedupDataOp, which gives two 1-D tensors, integer + * and floating point. With respect to tuple_mask, this op merges values of these + * two tensors into an XLA tuple, which should be as same as input to + * SplitDedupDataOp. + * + * @param integerTensor A 1-D integer tensor, includes integer elements of deduplication data tuple. + * @param floatTensor A 1-D float tensor, includes float elements of deduplication data tuple. + * @param tupleMask A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor, + * with first column as tuple element type, and second column as span of this type. + * For example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0, + * 1]]. We expect only two types of elements: integer(0) and float(1). + * @param options carries optional attribute values + * @return a new instance of MergeDedupData + */ + public MergeDedupData mergeDedupData(Operand integerTensor, + Operand floatTensor, String tupleMask, MergeDedupData.Options... options) { + return MergeDedupData.create(scope, integerTensor, floatTensor, tupleMask, options); + } + + /** + * A TPU core selector Op. + * This Op produces a set of TPU cores (for warm-up) or a single TPU core + * (for regular inference) to execute the TPU program on. The output is + * consumed by TPUPartitionedCall. + * + * @return a new instance of OrdinalSelector + */ + public OrdinalSelector ordinalSelector() { + return OrdinalSelector.create(scope); + } + + /** + * Retrieves a single tensor from the computation outfeed. + * This operation will block indefinitely until data is available. + * + * @param dtype The type of elements in the tensor. + * @param shape The shape of the tensor. + * @param options carries optional attribute values + * @param data type for {@code OutfeedDequeue} output and operands + * @return a new instance of OutfeedDequeue + */ + public OutfeedDequeue outfeedDequeue(Class dtype, Shape shape, + OutfeedDequeue.Options... options) { + return OutfeedDequeue.create(scope, dtype, shape, options); + } + + /** + * Retrieve multiple values from the computation outfeed. + * This operation will block indefinitely until data is available. Output {@code i} + * corresponds to XLA tuple element {@code i}. + * + * @param dtypes The element types of each element in {@code outputs}. + * @param shapes The shapes of each tensor in {@code outputs}. + * @param options carries optional attribute values + * @return a new instance of OutfeedDequeueTuple + */ + public OutfeedDequeueTuple outfeedDequeueTuple(List> dtypes, + List shapes, OutfeedDequeueTuple.Options... options) { + return OutfeedDequeueTuple.create(scope, dtypes, shapes, options); + } + + /** + * Retrieve multiple values from the computation outfeed. Device ordinal is a + * tensor allowing dynamic outfeed. + * This operation will block indefinitely until data is available. Output {@code i} + * corresponds to XLA tuple element {@code i}. + * + * @param deviceOrdinal An int scalar tensor, representing the TPU device to use. This should be -1 when + * the Op is running on a TPU device, and >= 0 when the Op is running on the CPU + * device. + * @param dtypes The element types of each element in {@code outputs}. + * @param shapes The shapes of each tensor in {@code outputs}. + * @return a new instance of OutfeedDequeueTupleV2 + */ + public OutfeedDequeueTupleV2 outfeedDequeueTupleV2(Operand deviceOrdinal, + List> dtypes, List shapes) { + return OutfeedDequeueTupleV2.create(scope, deviceOrdinal, dtypes, shapes); + } + + /** + * Retrieves a single tensor from the computation outfeed. Device ordinal is a + * tensor allowing dynamic outfeed. + * This operation will block indefinitely until data is available. + * + * @param deviceOrdinal An int scalar tensor, representing the TPU device to use. This should be -1 when + * the Op is running on a TPU device, and >= 0 when the Op is running on the CPU + * device. + * @param dtype The type of elements in the tensor. + * @param shape The shape of the tensor. + * @param data type for {@code OutfeedDequeueV2} output and operands + * @return a new instance of OutfeedDequeueV2 + */ + public OutfeedDequeueV2 outfeedDequeueV2(Operand deviceOrdinal, + Class dtype, Shape shape) { + return OutfeedDequeueV2.create(scope, deviceOrdinal, dtype, shape); + } + + /** + * Enqueue a Tensor on the computation outfeed. + * + * @param input A tensor that will be inserted into the outfeed queue. + * @return a new instance of OutfeedEnqueue + */ + public OutfeedEnqueue outfeedEnqueue(Operand input) { + return OutfeedEnqueue.create(scope, input); + } + + /** + * Enqueue multiple Tensor values on the computation outfeed. + * + * @param inputs A list of tensors that will be inserted into the outfeed queue as an + * XLA tuple. + * @return a new instance of OutfeedEnqueueTuple + */ + public OutfeedEnqueueTuple outfeedEnqueueTuple(Iterable> inputs) { + return OutfeedEnqueueTuple.create(scope, inputs); + } + + /** + * Calls a function placed on a specified TPU device. + * + * @param args The arguments to the function. + * @param deviceOrdinal The TPU device ordinal to run the function on. + * @param Tout The types of the outputs of the function. + * @param f The function to call. + * @param options carries optional attribute values + * @return a new instance of PartitionedCall + */ + public PartitionedCall partitionedCall(Iterable> args, Operand deviceOrdinal, + List> Tout, ConcreteFunction f, PartitionedCall.Options... options) { + return PartitionedCall.create(scope, args, deviceOrdinal, Tout, f, options); + } + + /** + * An op that groups a list of partitioned inputs together. Supports ND sharding. + * + * @param inputs A list of partitioned inputs which must have the same shape. + * @param partitionDims A list of integers describing how each dimension is partitioned. Emptiness + * indicates the inputs are replicated. + * @param options carries optional attribute values + * @param data type for {@code TPUPartitionedInputV2} output and operands + * @return a new instance of PartitionedInput + */ + public PartitionedInput partitionedInput(Iterable> inputs, + List partitionDims, PartitionedInput.Options... options) { + return PartitionedInput.create(scope, inputs, partitionDims, options); + } + /** * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned * outputs outside the XLA computation. Supports ND sharding. * - * @param data type for {@code output} output * @param inputs A tensor which represents the full shape of partitioned tensors. * @param numSplits The value of the numSplits attribute * @param partitionDims A list of integers describing how each dimension is partitioned. Emptiness @@ -261,6 +1361,387 @@ public PartitionedOutput partitionedOutput(Operand input return PartitionedOutput.create(scope, inputs, numSplits, partitionDims); } + /** + * An op which linearizes one Tensor value to an opaque variant tensor. + * + * @param input A tensor that will be linearized. + * @param options carries optional attribute values + * @return a new instance of Prelinearize + */ + public Prelinearize prelinearize(Operand input, + Prelinearize.Options... options) { + return Prelinearize.create(scope, input, options); + } + + /** + * An op which linearizes multiple Tensor values to an opaque variant tensor. + * + * @param inputs A list of tensors that will be provided using the infeed mechanism. + * @param shapes The shapes of each tensor in {@code inputs}. + * @param options carries optional attribute values + * @return a new instance of PrelinearizeTuple + */ + public PrelinearizeTuple prelinearizeTuple(Iterable> inputs, List shapes, + PrelinearizeTuple.Options... options) { + return PrelinearizeTuple.create(scope, inputs, shapes, options); + } + + /** + * An op that receives embedding activations on the TPU. + * The TPU system performs the embedding lookups and aggregations specified by + * the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The + * results of these aggregations are visible to the Tensorflow Graph as the + * outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing + * one Tensor of activations per table specified in the model. There can be at + * most one RecvTPUEmbeddingActivations op in the TPU graph. + * + * @param numOutputs The number of output activation tensors, equal to the number of + * embedding tables in the model. + * @param config Serialized TPUEmbeddingConfiguration proto. + * @return a new instance of RecvTPUEmbeddingActivations + */ + public RecvTPUEmbeddingActivations recvTPUEmbeddingActivations(Long numOutputs, String config) { + return RecvTPUEmbeddingActivations.create(scope, numOutputs, config); + } + + /** + * Metadata indicating how the TPU computation should be replicated. + * This operation holds the metadata common to operations of a {@code tpu.replicate()} computation subgraph. + * + * @param numReplicas Number of replicas of the computation + * @param options carries optional attribute values + * @return a new instance of ReplicateMetadata + */ + public ReplicateMetadata replicateMetadata(Long numReplicas, + ReplicateMetadata.Options... options) { + return ReplicateMetadata.create(scope, numReplicas, options); + } + + /** + * Connects N inputs to an N-way replicated TPU computation. + * This operation holds a replicated input to a {@code tpu.replicate()} computation subgraph. + * Each replicated input has the same shape and type alongside the output. + *

For example: + *

+   *  %a = "tf.opA"()
+   *  %b = "tf.opB"()
+   *  %replicated_input = "tf.TPUReplicatedInput"(%a, %b)
+   *  %computation = "tf.Computation"(%replicated_input)
+   *  
+ *

The above computation has a replicated input of two replicas. + * + * @param inputs The inputs value + * @param options carries optional attribute values + * @param data type for {@code TPUReplicatedInput} output and operands + * @return a new instance of ReplicatedInput + */ + public ReplicatedInput replicatedInput(Iterable> inputs, + ReplicatedInput.Options... options) { + return ReplicatedInput.create(scope, inputs, options); + } + + /** + * Connects N outputs from an N-way replicated TPU computation. + * This operation holds a replicated output from a {@code tpu.replicate()} computation subgraph. + * Each replicated output has the same shape and type alongside the input. + *

For example: + *

+   *  %computation = "tf.Computation"()
+   *  %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
+   *  
+ *

The above computation has a replicated output of two replicas. + * + * @param input The input value + * @param numReplicas The value of the numReplicas attribute + * @param data type for {@code TPUReplicatedOutput} output and operands + * @return a new instance of ReplicatedOutput + */ + public ReplicatedOutput replicatedOutput(Operand input, + Long numReplicas) { + return ReplicatedOutput.create(scope, input, numReplicas); + } + + /** + * An op that retrieves optimization parameters from embedding to host memory. + * An op that retrieves optimization parameters from embedding to host memory. + * Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + * embedding table configuration. For example, this op is used to retrieve updated + * parameters before saving a checkpoint. For Adagrad, auxiliary1 will contain the + * accumulators after running this op. For SGD, all of the auxiliary* values will + * be empty (0x0 tensors for that table). For FTRL, auxiliary1 will contain the + * accumulators and auxiliary2 will contain the linear terms. For ADAM, auxiliary1 + * will contain the momenta and auxiliary2 will contain the velocities. + * + * @param NumTables The number of embedding tables. + * @param config An TPUEmbeddingConfiguration proto describing the + * table parameters being loaded, serialized to a string. + * @param numShards Number of shards into which the embedding tables are divided. + * @param shardId Identifier of shard for this operation. + * @return a new instance of RetrieveAllTPUEmbeddingParameters + */ + public RetrieveAllTPUEmbeddingParameters retrieveAllTPUEmbeddingParameters(Long NumTables, + String config, Long numShards, Long shardId) { + return RetrieveAllTPUEmbeddingParameters.create(scope, NumTables, config, numShards, shardId); + } + + /** + * Retrieve ADAM embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingADAMParameters + */ + public RetrieveTPUEmbeddingADAMParameters retrieveTPUEmbeddingADAMParameters(Long numShards, + Long shardId, RetrieveTPUEmbeddingADAMParameters.Options... options) { + return RetrieveTPUEmbeddingADAMParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve Adadelta embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingAdadeltaParameters + */ + public RetrieveTPUEmbeddingAdadeltaParameters retrieveTPUEmbeddingAdadeltaParameters( + Long numShards, Long shardId, RetrieveTPUEmbeddingAdadeltaParameters.Options... options) { + return RetrieveTPUEmbeddingAdadeltaParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve Adagrad Momentum embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingAdagradMomentumParameters + */ + public RetrieveTPUEmbeddingAdagradMomentumParameters retrieveTPUEmbeddingAdagradMomentumParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingAdagradMomentumParameters.Options... options) { + return RetrieveTPUEmbeddingAdagradMomentumParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve Adagrad embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingAdagradParameters + */ + public RetrieveTPUEmbeddingAdagradParameters retrieveTPUEmbeddingAdagradParameters(Long numShards, + Long shardId, RetrieveTPUEmbeddingAdagradParameters.Options... options) { + return RetrieveTPUEmbeddingAdagradParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve centered RMSProp embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingCenteredRMSPropParameters + */ + public RetrieveTPUEmbeddingCenteredRMSPropParameters retrieveTPUEmbeddingCenteredRMSPropParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingCenteredRMSPropParameters.Options... options) { + return RetrieveTPUEmbeddingCenteredRMSPropParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve FTRL embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingFTRLParameters + */ + public RetrieveTPUEmbeddingFTRLParameters retrieveTPUEmbeddingFTRLParameters(Long numShards, + Long shardId, RetrieveTPUEmbeddingFTRLParameters.Options... options) { + return RetrieveTPUEmbeddingFTRLParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve frequency estimator embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingFrequencyEstimatorParameters + */ + public RetrieveTPUEmbeddingFrequencyEstimatorParameters retrieveTPUEmbeddingFrequencyEstimatorParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingFrequencyEstimatorParameters.Options... options) { + return RetrieveTPUEmbeddingFrequencyEstimatorParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve MDL Adagrad Light embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingMDLAdagradLightParameters + */ + public RetrieveTPUEmbeddingMDLAdagradLightParameters retrieveTPUEmbeddingMDLAdagradLightParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingMDLAdagradLightParameters.Options... options) { + return RetrieveTPUEmbeddingMDLAdagradLightParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve Momentum embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingMomentumParameters + */ + public RetrieveTPUEmbeddingMomentumParameters retrieveTPUEmbeddingMomentumParameters( + Long numShards, Long shardId, RetrieveTPUEmbeddingMomentumParameters.Options... options) { + return RetrieveTPUEmbeddingMomentumParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve proximal Adagrad embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingProximalAdagradParameters + */ + public RetrieveTPUEmbeddingProximalAdagradParameters retrieveTPUEmbeddingProximalAdagradParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingProximalAdagradParameters.Options... options) { + return RetrieveTPUEmbeddingProximalAdagradParameters.create(scope, numShards, shardId, options); + } + + /** + * The RetrieveTPUEmbeddingProximalYogiParameters operation + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingProximalYogiParameters + */ + public RetrieveTPUEmbeddingProximalYogiParameters retrieveTPUEmbeddingProximalYogiParameters( + Long numShards, Long shardId, RetrieveTPUEmbeddingProximalYogiParameters.Options... options) { + return RetrieveTPUEmbeddingProximalYogiParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve RMSProp embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingRMSPropParameters + */ + public RetrieveTPUEmbeddingRMSPropParameters retrieveTPUEmbeddingRMSPropParameters(Long numShards, + Long shardId, RetrieveTPUEmbeddingRMSPropParameters.Options... options) { + return RetrieveTPUEmbeddingRMSPropParameters.create(scope, numShards, shardId, options); + } + + /** + * Retrieve SGD embedding parameters. + * An op that retrieves optimization parameters from embedding to host + * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + * the correct embedding table configuration. For example, this op is + * used to retrieve updated parameters before saving a checkpoint. + * + * @param numShards The value of the numShards attribute + * @param shardId The value of the shardId attribute + * @param options carries optional attribute values + * @return a new instance of RetrieveTPUEmbeddingStochasticGradientDescentParameters + */ + public RetrieveTPUEmbeddingStochasticGradientDescentParameters retrieveTPUEmbeddingStochasticGradientDescentParameters( + Long numShards, Long shardId, + RetrieveTPUEmbeddingStochasticGradientDescentParameters.Options... options) { + return RetrieveTPUEmbeddingStochasticGradientDescentParameters.create(scope, numShards, shardId, options); + } + + /** + * Performs gradient updates of embedding tables. + * + * @param inputs A TensorList of gradients with which to update embedding tables. + * This argument has the same length and shapes as the return value of + * RecvTPUEmbeddingActivations, but contains gradients of the model's loss + * with respect to the embedding activations. The embedding tables are updated + * from these gradients via the optimizer specified in the TPU embedding + * configuration given to tpu.initialize_system. + * @param learningRates A TensorList of float32 scalars, one for each dynamic learning + * rate tag: see the comments in + * //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. + * Multiple tables can share the same dynamic learning rate tag as specified + * in the configuration. If the learning rates for all tables are constant, + * this list should be empty. + * @param config Serialized TPUEmbeddingConfiguration proto. + * @param options carries optional attribute values + * @return a new instance of SendTPUEmbeddingGradients + */ + public SendTPUEmbeddingGradients sendTPUEmbeddingGradients(Iterable> inputs, + Iterable> learningRates, String config, + SendTPUEmbeddingGradients.Options... options) { + return SendTPUEmbeddingGradients.create(scope, inputs, learningRates, config, options); + } + + /** + * Shuts down a running distributed TPU system. + * The op returns an error if no system is running. + * + * @return a new instance of ShutdownDistributedTPU + */ + public ShutdownDistributedTPU shutdownDistributedTPU() { + return ShutdownDistributedTPU.create(scope); + } + /** * An op that shuts down the TPU system. * @@ -270,6 +1751,193 @@ public ShutdownTPUSystem shutdownTPUSystem() { return ShutdownTPUSystem.create(scope); } + /** + * An op splits input deduplication data XLA tuple into integer and floating point + * tensors. + * Deduplication data is an XLA tuple, which consists of integer and floating point + * values. This op is to split these values into two groups for two types, and + * construct each group as one tensor to return. + * + * @param input An XLA tuple including integer and float elements as deduplication data tuple. + * @param integerType integer_tensor type. Allowed types: int32, int64, uint32, uint64. + * @param floatType float_tensor type. Allowed types: half, bfloat16, float. + * @param tupleMask A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor, + * with first column as tuple element type, and second column as span of this type. + * For example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0, + * 1]]. We expect only two types of elements: integer(0) and float(1). + * @param options carries optional attribute values + * @param data type for {@code SplitDedupData} output and operands + * @param data type for {@code SplitDedupData} output and operands + * @return a new instance of SplitDedupData + */ + public SplitDedupData splitDedupData( + Operand input, Class integerType, Class floatType, String tupleMask, + SplitDedupData.Options... options) { + return SplitDedupData.create(scope, input, integerType, floatType, tupleMask, options); + } + + /** + * The StoreMinibatchStatisticsInFdo operation + * + * @param programKey The programKey value + * @param maxIds The maxIds value + * @param maxUniques The maxUniques value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchSplits The value of the miniBatchSplits attribute + * @return a new instance of StoreMinibatchStatisticsInFdo + */ + public StoreMinibatchStatisticsInFdo storeMinibatchStatisticsInFdo(Operand programKey, + Operand maxIds, Operand maxUniques, Long sampleCount, Long numReplica, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) { + return StoreMinibatchStatisticsInFdo.create(scope, programKey, maxIds, maxUniques, sampleCount, numReplica, featureWidth, numScPerChip, tableName, miniBatchSplits); + } + + /** + * The TPUAnnotateTensorsWithDynamicShape operation + * + * @param tensors The tensors value + * @return a new instance of TPUAnnotateTensorsWithDynamicShape + */ + public TPUAnnotateTensorsWithDynamicShape tPUAnnotateTensorsWithDynamicShape( + Iterable> tensors) { + return TPUAnnotateTensorsWithDynamicShape.create(scope, tensors); + } + + /** + * Returns the result of a TPU compilation. + * This operation returns the result of a TPU compilation as a serialized + * CompilationResultProto, which holds a status and an error message if an error + * occurred during compilation. + * + * @deprecated use {@link org.tensorflow.op.tpu.CompilationResult} instead + * @return a new instance of TPUCompilationResult + */ + @Deprecated + public TPUCompilationResult tPUCompilationResult() { + return TPUCompilationResult.create(scope); + } + + /** + * Op that copies host tensor to device with dynamic shape support. + * For internal use only. + * + * @param tensors The tensors value + * @param unpaddedSizes The unpaddedSizes value + * @return a new instance of TPUCopyWithDynamicShape + */ + public TPUCopyWithDynamicShape tPUCopyWithDynamicShape(Iterable> tensors, + Iterable> unpaddedSizes) { + return TPUCopyWithDynamicShape.create(scope, tensors, unpaddedSizes); + } + + /** + * An op enabling differentiation of TPU Embeddings. + * This op simply returns its first input, which is assumed to have been sliced + * from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of + * this op, and its first argument being a trainable Variable, enables automatic + * differentiation of graphs containing embeddings via the TPU Embedding Python + * libraries. + * + * @deprecated use {@link org.tensorflow.op.tpu.EmbeddingActivations} instead + * @param embeddingVariable A trainable variable, enabling optimizers to find this op. + * @param slicedActivations The embedding activations Tensor to return. + * @param tableId The id of the table in the embedding layer configuration from which + * these activations were computed. + * @param lookupId Identifier of the set of embedding indices which produced these + * activations. + * @return a new instance of TPUEmbeddingActivations + */ + @Deprecated + public TPUEmbeddingActivations tPUEmbeddingActivations(Operand embeddingVariable, + Operand slicedActivations, Long tableId, Long lookupId) { + return TPUEmbeddingActivations.create(scope, embeddingVariable, slicedActivations, tableId, lookupId); + } + + /** + * Metadata indicating how the TPU computation should be replicated. + * This operation holds the metadata common to operations of a {@code tpu.replicate()} computation subgraph. + * + * @deprecated use {@link org.tensorflow.op.tpu.ReplicateMetadata} instead + * @param numReplicas Number of replicas of the computation + * @param options carries optional attribute values + * @return a new instance of TPUReplicateMetadata + */ + @Deprecated + public TPUReplicateMetadata tPUReplicateMetadata(Long numReplicas, + TPUReplicateMetadata.Options... options) { + return TPUReplicateMetadata.create(scope, numReplicas, options); + } + + /** + * Connects N inputs to an N-way replicated TPU computation. + * This operation holds a replicated input to a {@code tpu.replicate()} computation subgraph. + * Each replicated input has the same shape and type alongside the output. + *

For example: + *

+   *  %a = "tf.opA"()
+   *  %b = "tf.opB"()
+   *  %replicated_input = "tf.TPUReplicatedInput"(%a, %b)
+   *  %computation = "tf.Computation"(%replicated_input)
+   *  
+ *

The above computation has a replicated input of two replicas. + * + * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedInput} instead + * @param inputs The inputs value + * @param options carries optional attribute values + * @param data type for {@code TPUReplicatedInput} output and operands + * @return a new instance of TPUReplicatedInput + */ + @Deprecated + public TPUReplicatedInput tPUReplicatedInput(Iterable> inputs, + TPUReplicatedInput.Options... options) { + return TPUReplicatedInput.create(scope, inputs, options); + } + + /** + * Connects N outputs from an N-way replicated TPU computation. + * This operation holds a replicated output from a {@code tpu.replicate()} computation subgraph. + * Each replicated output has the same shape and type alongside the input. + *

For example: + *

+   *  %computation = "tf.Computation"()
+   *  %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
+   *  
+ *

The above computation has a replicated output of two replicas. + * + * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedOutput} instead + * @param input The input value + * @param numReplicas The value of the numReplicas attribute + * @param data type for {@code TPUReplicatedOutput} output and operands + * @return a new instance of TPUReplicatedOutput + */ + @Deprecated + public TPUReplicatedOutput tPUReplicatedOutput(Operand input, + Long numReplicas) { + return TPUReplicatedOutput.create(scope, input, numReplicas); + } + + /** + * Op that reshards on-device TPU variables to specified state. + * Op that reshards on-device TPU variables to specified state. Internal use only. + *

The sharding state is represented as the key of the compilation that generated + * the sharding/unsharding programs along with the main program. new_format_key + * specifies the desired state, and format_state_var is the current state of the + * variables. + * + * @param vars The vars value + * @param newFormatKey The newFormatKey value + * @param formatStateVar The formatStateVar value + * @return a new instance of TPUReshardVariables + */ + public TPUReshardVariables tPUReshardVariables(Iterable> vars, + Operand newFormatKey, Operand formatStateVar) { + return TPUReshardVariables.create(scope, vars, newFormatKey, formatStateVar); + } + /** * Round-robin load balancing on TPU cores. * A load balancing op that round-robins among TPU cores. @@ -300,6 +1968,18 @@ public TpuHandleToProtoKey tpuHandleToProtoKey(Operand uid) { return TpuHandleToProtoKey.create(scope, uid); } + /** + * Worker heartbeat op. + * Heartbeats may be sent periodically to indicate the coordinator is still active, + * to retrieve the current worker status and to expedite shutdown when necessary. + * + * @param request A string tensor containing a serialized WorkerHeartbeatRequest + * @return a new instance of WorkerHeartbeat + */ + public WorkerHeartbeat workerHeartbeat(Operand request) { + return WorkerHeartbeat.create(scope, request); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java index a4333717ba5..3ee5b8de813 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java @@ -25,9 +25,11 @@ import org.tensorflow.op.train.AccumulatorNumAccumulated; import org.tensorflow.op.train.AccumulatorSetGlobalStep; import org.tensorflow.op.train.AccumulatorTakeGradient; +import org.tensorflow.op.train.ApplyAdaMax; import org.tensorflow.op.train.ApplyAdadelta; import org.tensorflow.op.train.ApplyAdagrad; import org.tensorflow.op.train.ApplyAdagradDa; +import org.tensorflow.op.train.ApplyAdagradV2; import org.tensorflow.op.train.ApplyAdam; import org.tensorflow.op.train.ApplyAddSign; import org.tensorflow.op.train.ApplyCenteredRmsProp; @@ -39,12 +41,20 @@ import org.tensorflow.op.train.ApplyProximalGradientDescent; import org.tensorflow.op.train.ApplyRmsProp; import org.tensorflow.op.train.BatchMatMul; +import org.tensorflow.op.train.ComputeBatchSize; import org.tensorflow.op.train.ConditionalAccumulator; +import org.tensorflow.op.train.DistributedSave; import org.tensorflow.op.train.GenerateVocabRemapping; import org.tensorflow.op.train.MergeV2Checkpoints; import org.tensorflow.op.train.NegTrain; import org.tensorflow.op.train.PreventGradient; +import org.tensorflow.op.train.ResourceAccumulatorApplyGradient; +import org.tensorflow.op.train.ResourceAccumulatorNumAccumulated; +import org.tensorflow.op.train.ResourceAccumulatorSetGlobalStep; +import org.tensorflow.op.train.ResourceAccumulatorTakeGradient; +import org.tensorflow.op.train.ResourceApplyAdaMax; import org.tensorflow.op.train.ResourceApplyAdadelta; +import org.tensorflow.op.train.ResourceApplyAdagrad; import org.tensorflow.op.train.ResourceApplyAdagradDa; import org.tensorflow.op.train.ResourceApplyAdam; import org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad; @@ -58,9 +68,11 @@ import org.tensorflow.op.train.ResourceApplyProximalAdagrad; import org.tensorflow.op.train.ResourceApplyProximalGradientDescent; import org.tensorflow.op.train.ResourceApplyRmsProp; +import org.tensorflow.op.train.ResourceConditionalAccumulator; import org.tensorflow.op.train.ResourceSparseApplyAdadelta; import org.tensorflow.op.train.ResourceSparseApplyAdagrad; import org.tensorflow.op.train.ResourceSparseApplyAdagradDa; +import org.tensorflow.op.train.ResourceSparseApplyAdagradV2; import org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp; import org.tensorflow.op.train.ResourceSparseApplyFtrl; import org.tensorflow.op.train.ResourceSparseApplyKerasMomentum; @@ -73,8 +85,10 @@ import org.tensorflow.op.train.Save; import org.tensorflow.op.train.SaveSlices; import org.tensorflow.op.train.SdcaFprint; +import org.tensorflow.op.train.SdcaOptimizer; import org.tensorflow.op.train.SdcaShrinkL1; import org.tensorflow.op.train.SparseApplyAdadelta; +import org.tensorflow.op.train.SparseApplyAdagrad; import org.tensorflow.op.train.SparseApplyAdagradDa; import org.tensorflow.op.train.SparseApplyCenteredRmsProp; import org.tensorflow.op.train.SparseApplyFtrl; @@ -94,7 +108,7 @@ /** * An API for building {@code train} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class TrainOps { private final Scope scope; @@ -152,7 +166,6 @@ public AccumulatorSetGlobalStep accumulatorSetGlobalStep(Operand handle * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param data type for {@code average} output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type @@ -165,6 +178,31 @@ public AccumulatorTakeGradient accumulatorTakeGradient( return AccumulatorTakeGradient.create(scope, handle, numRequired, dtype); } + /** + * Update '*var' according to the AdaMax algorithm. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * v_t <- max(beta2 * v_{t-1}, abs(g)) + * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for {@code ApplyAdaMax} output and operands + * @return a new instance of ApplyAdaMax + */ + public ApplyAdaMax applyAdaMax(Operand var, Operand m, Operand v, + Operand beta1Power, Operand lr, Operand beta1, Operand beta2, Operand epsilon, + Operand grad, ApplyAdaMax.Options... options) { + return ApplyAdaMax.create(scope, var, m, v, beta1Power, lr, beta1, beta2, epsilon, grad, options); + } + /** * Update '*var' according to the adadelta scheme. * accum = rho() * accum + (1 - rho()) * grad.square(); @@ -172,7 +210,6 @@ public AccumulatorTakeGradient accumulatorTakeGradient( * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -195,7 +232,6 @@ public ApplyAdadelta applyAdadelta(Operand var, Operand< * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -212,7 +248,6 @@ public ApplyAdagrad applyAdagrad(Operand var, Operand /** * Update '*var' according to the proximal adagrad scheme. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -232,6 +267,25 @@ public ApplyAdagradDa applyAdagradDa(Operand var, return ApplyAdagradDa.create(scope, var, gradientAccumulator, gradientSquaredAccumulator, grad, lr, l1, l2, globalStep, options); } + /** + * Update '*var' according to the adagrad scheme. + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for {@code ApplyAdagradV2} output and operands + * @return a new instance of ApplyAdagradV2 + */ + public ApplyAdagradV2 applyAdagradV2(Operand var, Operand accum, + Operand lr, Operand epsilon, Operand grad, ApplyAdagradV2.Options... options) { + return ApplyAdagradV2.create(scope, var, accum, lr, epsilon, grad, options); + } + /** * Update '*var' according to the Adam algorithm. * $$\text{lr}t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ @@ -239,7 +293,6 @@ public ApplyAdagradDa applyAdagradDa(Operand var, * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -266,7 +319,6 @@ public ApplyAdam applyAdam(Operand var, Operand m, Op * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -301,7 +353,6 @@ public ApplyAddSign applyAddSign(Operand var, Operand * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -332,7 +383,6 @@ public ApplyCenteredRmsProp applyCenteredRmsProp(Operand * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -355,7 +405,6 @@ public ApplyFtrl applyFtrl(Operand var, Operand accum /** * Update '*var' by subtracting 'alpha' * 'delta' from it. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. @@ -374,7 +423,6 @@ public ApplyGradientDescent applyGradientDescent(Operand *

accum = accum * momentum + grad * var -= lr * accum * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -395,7 +443,6 @@ public ApplyMomentum applyMomentum(Operand var, Operand< * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -419,7 +466,6 @@ public ApplyPowerSign applyPowerSign(Operand var, Operan * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -441,7 +487,6 @@ public ApplyProximalAdagrad applyProximalAdagrad(Operand * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -468,7 +513,6 @@ public ApplyProximalGradientDescent applyProximalGradientDe * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -510,7 +554,6 @@ public ApplyRmsProp applyRmsProp(Operand var, Operand * about broadcasting * here . * - * @param data type for {@code output} output * @param x 2-D or higher with shape {@code [..., r_x, c_x]}. * @param y 2-D or higher with shape {@code [..., r_y, c_y]}. * @param Tout If not spcified, Tout is the same type to input type. @@ -523,6 +566,16 @@ public BatchMatMul batchMatMul(Operand x, return BatchMatMul.create(scope, x, y, Tout, options); } + /** + * Computes the static batch size of a dataset sans partial batches. + * + * @param inputDataset The inputDataset value + * @return a new instance of ComputeBatchSize + */ + public ComputeBatchSize computeBatchSize(Operand inputDataset) { + return ComputeBatchSize.create(scope, inputDataset); + } + /** * A conditional accumulator for aggregating gradients. * The accumulator accepts gradients marked with local_step greater or @@ -543,6 +596,20 @@ public ConditionalAccumulator conditionalAccumulator(Class return ConditionalAccumulator.create(scope, dtype, shape, options); } + /** + * The DistributedSave operation + * + * @param dataset The dataset value + * @param directory The directory value + * @param address The address value + * @param options carries optional attribute values + * @return a new instance of DistributedSave + */ + public DistributedSave distributedSave(Operand dataset, + Operand directory, Operand address, DistributedSave.Options... options) { + return DistributedSave.create(scope, dataset, directory, address, options); + } + /** * Given a path to new and old vocabulary files, returns a remapping Tensor of * length {@code num_new_vocab}, where {@code remapping[i]} contains the row number in the old @@ -633,7 +700,6 @@ public NegTrain negTrain(Operand wIn, Operand wOut, Operand< * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. * - * @param data type for {@code output} output * @param input any tensor. * @param options carries optional attribute values * @param data type for {@code PreventGradient} output and operands @@ -644,6 +710,92 @@ public PreventGradient preventGradient(Operand input, return PreventGradient.create(scope, input, options); } + /** + * Applies a gradient to a given accumulator. + * Does not add if local_step is lesser than the accumulator's global_step. + * + * @param handle The handle to a accumulator. + * @param localStep The local_step value at which the gradient was computed. + * @param gradient A tensor of the gradient to be accumulated. + * @return a new instance of ResourceAccumulatorApplyGradient + */ + public ResourceAccumulatorApplyGradient resourceAccumulatorApplyGradient( + Operand handle, Operand localStep, + Operand gradient) { + return ResourceAccumulatorApplyGradient.create(scope, handle, localStep, gradient); + } + + /** + * Returns the number of gradients aggregated in the given accumulators. + * + * @param handle The handle to an accumulator. + * @return a new instance of ResourceAccumulatorNumAccumulated + */ + public ResourceAccumulatorNumAccumulated resourceAccumulatorNumAccumulated( + Operand handle) { + return ResourceAccumulatorNumAccumulated.create(scope, handle); + } + + /** + * Updates the accumulator with a new value for global_step. + * Logs warning if the accumulator's value is already higher than + * new_global_step. + * + * @param handle The handle to an accumulator. + * @param newGlobalStep The new global_step value to set. + * @return a new instance of ResourceAccumulatorSetGlobalStep + */ + public ResourceAccumulatorSetGlobalStep resourceAccumulatorSetGlobalStep( + Operand handle, Operand newGlobalStep) { + return ResourceAccumulatorSetGlobalStep.create(scope, handle, newGlobalStep); + } + + /** + * Extracts the average gradient in the given ConditionalAccumulator. + * The op blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it returns the average of + * the accumulated gradients. Also automatically increments the recorded + * global_step in the accumulator by 1, and resets the aggregate to 0. + * + * @param handle The handle to an accumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @param data type for {@code ResourceAccumulatorTakeGradient} output and operands + * @return a new instance of ResourceAccumulatorTakeGradient + */ + public ResourceAccumulatorTakeGradient resourceAccumulatorTakeGradient( + Operand handle, Operand numRequired, Class dtype) { + return ResourceAccumulatorTakeGradient.create(scope, handle, numRequired, dtype); + } + + /** + * Update '*var' according to the AdaMax algorithm. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * v_t <- max(beta2 * v_{t-1}, abs(g)) + * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for {@code ResourceApplyAdaMax} output and operands + * @return a new instance of ResourceApplyAdaMax + */ + public ResourceApplyAdaMax resourceApplyAdaMax(Operand var, + Operand m, Operand v, Operand beta1Power, Operand lr, + Operand beta1, Operand beta2, Operand epsilon, Operand grad, + ResourceApplyAdaMax.Options... options) { + return ResourceApplyAdaMax.create(scope, var, m, v, beta1Power, lr, beta1, beta2, epsilon, grad, options); + } + /** * Update '*var' according to the adadelta scheme. * accum = rho() * accum + (1 - rho()) * grad.square(); @@ -669,6 +821,26 @@ public ResourceApplyAdadelta resourceApplyAdadelta(Operand data type for {@code ResourceApplyAdagradV2} output and operands + * @return a new instance of ResourceApplyAdagrad + */ + public ResourceApplyAdagrad resourceApplyAdagrad(Operand var, + Operand accum, Operand lr, Operand epsilon, Operand grad, + ResourceApplyAdagrad.Options... options) { + return ResourceApplyAdagrad.create(scope, var, accum, lr, epsilon, grad, options); + } + /** * Update '*var' according to the proximal adagrad scheme. * @@ -995,6 +1167,28 @@ public ResourceApplyRmsProp resourceApplyRmsProp(Operand data type for {@code ResourceConditionalAccumulator} output and operands + * @return a new instance of ResourceConditionalAccumulator + */ + public ResourceConditionalAccumulator resourceConditionalAccumulator( + Class dtype, Shape shape, ResourceConditionalAccumulator.Options... options) { + return ResourceConditionalAccumulator.create(scope, dtype, shape, options); + } + /** * var: Should be from a Variable(). * @@ -1063,6 +1257,29 @@ public ResourceSparseApplyAdagradDa resourceSparseApplyAdagrad return ResourceSparseApplyAdagradDa.create(scope, var, gradientAccumulator, gradientSquaredAccumulator, grad, indices, lr, l1, l2, globalStep, options); } + /** + * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + * That is for rows we have grad for, we update var and accum as follows: + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for {@code ResourceSparseApplyAdagradV2} output and operands + * @return a new instance of ResourceSparseApplyAdagradV2 + */ + public ResourceSparseApplyAdagradV2 resourceSparseApplyAdagradV2( + Operand var, Operand accum, Operand lr, + Operand epsilon, Operand grad, Operand indices, + ResourceSparseApplyAdagradV2.Options... options) { + return ResourceSparseApplyAdagradV2.create(scope, var, accum, lr, epsilon, grad, indices, options); + } + /** * Update '*var' according to the centered RMSProp algorithm. * The centered RMSProp algorithm uses an estimate of the centered second moment @@ -1299,7 +1516,6 @@ public Restore restore(Operand prefix, Operand tensorNames, *

The {@code shape_and_slice} input has the same format as the * elements of the {@code shapes_and_slices} input of the {@code SaveSlices} op. * - * @param data type for {@code tensor} output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -1382,6 +1598,59 @@ public SdcaFprint sdcaFprint(Operand input) { return SdcaFprint.create(scope, input); } + /** + * Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for + * linear models with L1 + L2 regularization. As global optimization objective is + * strongly-convex, the optimizer optimizes the dual objective at each step. The + * optimizer applies each update one example at a time. Examples are sampled + * uniformly, and the optimizer is learning rate free and enjoys linear convergence + * rate. + *

Proximal Stochastic Dual Coordinate Ascent .
+ * Shai Shalev-Shwartz, Tong Zhang. 2012 + *

$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ + *

Adding vs. Averaging in Distributed Primal-Dual Optimization .
+ * Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, + * Peter Richtarik, Martin Takac. 2015 + *

Stochastic Dual Coordinate Ascent with Adaptive Probabilities .
+ * Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 + * + * @param sparseExampleIndices a list of vectors which contain example indices. + * @param sparseFeatureIndices a list of vectors which contain feature indices. + * @param sparseFeatureValues a list of vectors which contains feature value + * associated with each feature group. + * @param denseFeatures a list of matrices which contains the dense feature values. + * @param exampleWeights a vector which contains the weight associated with each + * example. + * @param exampleLabels a vector which contains the label/target associated with each + * example. + * @param sparseIndices a list of vectors where each value is the indices which has + * corresponding weights in sparse_weights. This field maybe omitted for the + * dense approach. + * @param sparseWeights a list of vectors where each value is the weight associated with + * a sparse feature group. + * @param denseWeights a list of vectors where the values are the weights associated + * with a dense feature group. + * @param exampleStateData a list of vectors containing the example state data. + * @param lossType Type of the primal loss. Currently SdcaSolver supports logistic, + * squared and hinge losses. + * @param l1 Symmetric l1 regularization strength. + * @param l2 Symmetric l2 regularization strength. + * @param numLossPartitions Number of partitions of the global loss function. + * @param numInnerIterations Number of iterations per mini-batch. + * @param options carries optional attribute values + * @return a new instance of SdcaOptimizer + */ + public SdcaOptimizer sdcaOptimizer(Iterable> sparseExampleIndices, + Iterable> sparseFeatureIndices, + Iterable> sparseFeatureValues, Iterable> denseFeatures, + Operand exampleWeights, Operand exampleLabels, + Iterable> sparseIndices, Iterable> sparseWeights, + Iterable> denseWeights, Operand exampleStateData, String lossType, + Float l1, Float l2, Long numLossPartitions, Long numInnerIterations, + SdcaOptimizer.Options... options) { + return SdcaOptimizer.create(scope, sparseExampleIndices, sparseFeatureIndices, sparseFeatureValues, denseFeatures, exampleWeights, exampleLabels, sparseIndices, sparseWeights, denseWeights, exampleStateData, lossType, l1, l2, numLossPartitions, numInnerIterations, options); + } + /** * Applies L1 regularization shrink step on the parameters. * @@ -1398,7 +1667,6 @@ public SdcaShrinkL1 sdcaShrinkL1(Iterable> weights, Float l1, /** * var: Should be from a Variable(). * - * @param data type for {@code out} output * @param var The var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). @@ -1417,10 +1685,31 @@ public SparseApplyAdadelta sparseApplyAdadelta(Operand v return SparseApplyAdadelta.create(scope, var, accum, accumUpdate, lr, rho, epsilon, grad, indices, options); } + /** + * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + * That is for rows we have grad for, we update var and accum as follows: + * $$accum += grad * grad$$ + * $$var -= lr * grad * (1 / sqrt(accum))$$ + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for {@code SparseApplyAdagradV2} output and operands + * @return a new instance of SparseApplyAdagrad + */ + public SparseApplyAdagrad sparseApplyAdagrad(Operand var, + Operand accum, Operand lr, Operand epsilon, Operand grad, + Operand indices, SparseApplyAdagrad.Options... options) { + return SparseApplyAdagrad.create(scope, var, accum, lr, epsilon, grad, indices, options); + } + /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1457,7 +1746,6 @@ public SparseApplyAdagradDa sparseApplyAdagradDa(Operand * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1490,7 +1778,6 @@ public SparseApplyCenteredRmsProp sparseApplyCenteredRmsPro * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1519,7 +1806,6 @@ public SparseApplyFtrl sparseApplyFtrl(Operand var, Oper *

$$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1544,7 +1830,6 @@ public SparseApplyMomentum sparseApplyMomentum(Operand v * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1568,7 +1853,6 @@ public SparseApplyProximalAdagrad sparseApplyProximalAdagra * $$prox_v = var - alpha * grad$$ * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -1596,7 +1880,6 @@ public SparseApplyProximalGradientDescent sparseApplyProxim * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -1648,7 +1931,6 @@ public SymbolicGradient symbolicGradient(Iterable> input, * along each dimension, {@code train.TileGrad} takes in {@code multiples} and aggregates * each repeated tile of {@code input} into {@code output}. * - * @param data type for {@code output} output * @param input The input value * @param multiples The multiples value * @param data type for {@code TileGrad} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java index 1136fb98bf1..22a2ef5ae85 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java @@ -21,15 +21,33 @@ import org.tensorflow.ConcreteFunction; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.xla.AssignVariableConcatND; +import org.tensorflow.op.xla.ConcatND; +import org.tensorflow.op.xla.ReadVariableSplitND; +import org.tensorflow.op.xla.SplitND; import org.tensorflow.op.xla.XlaHostCompute; import org.tensorflow.op.xla.XlaRecvFromHost; import org.tensorflow.op.xla.XlaSendToHost; +import org.tensorflow.op.xla.XlaSparseCoreAdagrad; +import org.tensorflow.op.xla.XlaSparseCoreAdagradMomentum; +import org.tensorflow.op.xla.XlaSparseCoreAdam; +import org.tensorflow.op.xla.XlaSparseCoreFtrl; +import org.tensorflow.op.xla.XlaSparseCoreSgd; +import org.tensorflow.op.xla.XlaSparseDenseMatmul; +import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradAndCsrInput; +import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput; +import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdamAndCsrInput; +import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithFtrlAndCsrInput; +import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithSgdAndCsrInput; +import org.tensorflow.op.xla.XlaSparseDenseMatmulWithCsrInput; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; /** * An API for building {@code xla} operations as {@link Op Op}s * - * @see {@link Ops} + * @see Ops */ public final class XlaOps { private final Scope scope; @@ -41,6 +59,187 @@ public final class XlaOps { this.ops = ops; } + /** + * Concats input tensor across all dimensions. + * An op which merges slices the input tensor based on the given num_splits + * attribute, strips paddings optionally, and writes the merged tensor without + * paddings to the resource variable. + *

This op may be generated via the TPU bridge. + *

For example, with {@code input} tensor: + *

+   *  [[0, 1],
+   *   [4, 5]]
+   *  [[2, 3],
+   *   [6, 7]]
+   *  [[8, 9],
+   *   [12, 13]]
+   *  [[10, 11],
+   *   [14, 15]]
+   *  
+ *

{@code num_splits}: + *

+   *  [2, 2]
+   *  
+ *

and {@code paddings}: + *

+   *  [1, 1]
+   *  
+ *

the expected {@code outputs} is: + *

+   *  [[0, 1, 2],
+   *   [4, 5, 6],
+   *   [8, 9, 10]]
+   *  
+ * + * @param resource Resource variable for concatenated input tensors across all dimensions. + * @param inputs Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * @param numConcats Number of ways to merge per dimension. + * @param options carries optional attribute values + * @return a new instance of AssignVariableConcatND + */ + public AssignVariableConcatND assignVariableConcatND(Operand resource, + Iterable> inputs, List numConcats, + AssignVariableConcatND.Options... options) { + return AssignVariableConcatND.create(scope, resource, inputs, numConcats, options); + } + + /** + * Concats input tensor across all dimensions. + * An op which merges slices the input tensor based on the given num_splits + * attribute, strips paddings optionally, and returns the merged tensor without + * paddings. + *

This op may be generated via the TPU bridge. + *

For example, with {@code input} tensor: + *

+   *  [[0, 1],
+   *   [4, 5]]
+   *  [[2, 3],
+   *   [6, 7]]
+   *  [[8, 9],
+   *   [12, 13]]
+   *  [[10, 11],
+   *   [14, 15]]
+   *  
+ *

{@code num_splits}: + *

+   *  [2, 2]
+   *  
+ *

and {@code paddings}: + *

+   *  [1, 1]
+   *  
+ *

the expected {@code outputs} is: + *

+   *  [[0, 1, 2],
+   *   [4, 5, 6],
+   *   [8, 9, 10]]
+   *  
+ * + * @param inputs Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * @param numConcats Number of ways to merge per dimension. + * @param options carries optional attribute values + * @param data type for {@code XlaConcatND} output and operands + * @return a new instance of ConcatND + */ + public ConcatND concatND(Iterable> inputs, List numConcats, + ConcatND.Options... options) { + return ConcatND.create(scope, inputs, numConcats, options); + } + + /** + * Splits resource variable input tensor across all dimensions. + * An op which splits the resource variable input tensor based on the given + * num_splits attribute, pads slices optionally, and returned the slices. Slices + * are returned in row-major order. + *

This op may be generated via the TPU bridge. + *

For example, with {@code input} tensor: + *

+   *  [[0, 1, 2],
+   *   [3, 4, 5],
+   *   [6, 7, 8]]
+   *  
+ *

{@code num_splits}: + *

+   *  [2, 2]
+   *  
+ *

and {@code paddings}: + *

+   *  [1, 1]
+   *  
+ *

the expected {@code outputs} is: + *

+   *  [[0, 1],
+   *   [3, 4]]
+   *  [[2, 0],
+   *   [5, 0]]
+   *  [[6, 7],
+   *   [0, 0]]
+   *  [[8, 0],
+   *   [0, 0]]
+   *  
+ * + * @param resource Resource variable of input tensor to split across all dimensions. + * @param T The value of the T attribute + * @param N The value of the N attribute + * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + * @param options carries optional attribute values + * @param data type for {@code ReadVariableXlaSplitND} output and operands + * @return a new instance of ReadVariableSplitND + */ + public ReadVariableSplitND readVariableSplitND( + Operand resource, Class T, Long N, List numSplits, + ReadVariableSplitND.Options... options) { + return ReadVariableSplitND.create(scope, resource, T, N, numSplits, options); + } + + /** + * Splits input tensor across all dimensions. + * An op which slices the input tensor based on the given num_splits attribute, + * pads slices optionally, and returned the slices. Slices are returned in + * row-major order. + *

This op may be generated via the TPU bridge. + *

For example, with {@code input} tensor: + *

+   *  [[0, 1, 2],
+   *   [3, 4, 5],
+   *   [6, 7, 8]]
+   *  
+ *

{@code num_splits}: + *

+   *  [2, 2]
+   *  
+ *

and {@code paddings}: + *

+   *  [1, 1]
+   *  
+ *

the expected {@code outputs} is: + *

+   *  [[0, 1],
+   *   [3, 4]]
+   *  [[2, 0],
+   *   [5, 0]]
+   *  [[6, 7],
+   *   [0, 0]]
+   *  [[8, 0],
+   *   [0, 0]]
+   *  
+ * + * @param input Input tensor to split across all dimensions. + * @param N The value of the N attribute + * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + * @param options carries optional attribute values + * @param data type for {@code XlaSplitND} output and operands + * @return a new instance of SplitND + */ + public SplitND splitND(Operand input, Long N, List numSplits, + SplitND.Options... options) { + return SplitND.create(scope, input, N, numSplits, options); + } + /** * A pseudo-op to represent host-side computation in an XLA program. * @@ -68,7 +267,6 @@ public XlaHostCompute xlaHostCompute(Iterable> inputs, * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param data type for {@code output} output * @param Toutput The value of the Toutput attribute * @param shape The value of the shape attribute * @param key The value of the key attribute @@ -94,6 +292,304 @@ public XlaSendToHost xlaSendToHost(Operand input, String key) { return XlaSendToHost.create(scope, input, key); } + /** + * The XlaSparseCoreAdagrad operation + * + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param accumulator The accumulator value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @return a new instance of XlaSparseCoreAdagrad + */ + public XlaSparseCoreAdagrad xlaSparseCoreAdagrad(Operand indices, + Operand gradient, Operand learningRate, Operand accumulator, + Operand embeddingTable, Long featureWidth) { + return XlaSparseCoreAdagrad.create(scope, indices, gradient, learningRate, accumulator, embeddingTable, featureWidth); + } + + /** + * The XlaSparseCoreAdagradMomentum operation + * + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param beta1 The beta1 value + * @param epsilon The epsilon value + * @param accumulator The accumulator value + * @param momentum The momentum value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @param useNesterov The value of the useNesterov attribute + * @param beta2 The value of the beta2 attribute + * @param exponent The value of the exponent attribute + * @return a new instance of XlaSparseCoreAdagradMomentum + */ + public XlaSparseCoreAdagradMomentum xlaSparseCoreAdagradMomentum(Operand indices, + Operand gradient, Operand learningRate, Operand beta1, + Operand epsilon, Operand accumulator, Operand momentum, + Operand embeddingTable, Long featureWidth, Boolean useNesterov, Float beta2, + Float exponent) { + return XlaSparseCoreAdagradMomentum.create(scope, indices, gradient, learningRate, beta1, epsilon, accumulator, momentum, embeddingTable, featureWidth, useNesterov, beta2, exponent); + } + + /** + * The XlaSparseCoreAdam operation + * + * @param embeddingTable The embeddingTable value + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param momentum The momentum value + * @param velocity The velocity value + * @param beta1 The beta1 value + * @param beta2 The beta2 value + * @param epsilon The epsilon value + * @param featureWidth The value of the featureWidth attribute + * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute + * @return a new instance of XlaSparseCoreAdam + */ + public XlaSparseCoreAdam xlaSparseCoreAdam(Operand embeddingTable, + Operand indices, Operand gradient, Operand learningRate, + Operand momentum, Operand velocity, Operand beta1, + Operand beta2, Operand epsilon, Long featureWidth, + Boolean useSumInsideSqrt) { + return XlaSparseCoreAdam.create(scope, embeddingTable, indices, gradient, learningRate, momentum, velocity, beta1, beta2, epsilon, featureWidth, useSumInsideSqrt); + } + + /** + * The XlaSparseCoreFtrl operation + * + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param linear The linear value + * @param learningRate The learningRate value + * @param indices The indices value + * @param gradient The gradient value + * @param beta The beta value + * @param learningRatePower The learningRatePower value + * @param l2RegularizationStrength The l2RegularizationStrength value + * @param featureWidth The value of the featureWidth attribute + * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute + * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute + * @return a new instance of XlaSparseCoreFtrl + */ + public XlaSparseCoreFtrl xlaSparseCoreFtrl(Operand embeddingTable, + Operand accumulator, Operand linear, Operand learningRate, + Operand indices, Operand gradient, Operand beta, + Operand learningRatePower, Operand l2RegularizationStrength, + Long featureWidth, Boolean multiplyLinearByLearningRate, Float l1RegularizationStrength) { + return XlaSparseCoreFtrl.create(scope, embeddingTable, accumulator, linear, learningRate, indices, gradient, beta, learningRatePower, l2RegularizationStrength, featureWidth, multiplyLinearByLearningRate, l1RegularizationStrength); + } + + /** + * The XlaSparseCoreSgd operation + * + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @return a new instance of XlaSparseCoreSgd + */ + public XlaSparseCoreSgd xlaSparseCoreSgd(Operand indices, Operand gradient, + Operand learningRate, Operand embeddingTable, Long featureWidth) { + return XlaSparseCoreSgd.create(scope, indices, gradient, learningRate, embeddingTable, featureWidth); + } + + /** + * The XlaSparseDenseMatmul operation + * + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param values The values value + * @param offsets The offsets value + * @param embeddingTable The embeddingTable value + * @param maxIdsPerPartition The value of the maxIdsPerPartition attribute + * @param maxUniqueIdsPerPartition The value of the maxUniqueIdsPerPartition attribute + * @param inputSize The value of the inputSize attribute + * @return a new instance of XlaSparseDenseMatmul + */ + public XlaSparseDenseMatmul xlaSparseDenseMatmul(Operand rowIds, + Operand colIds, Operand values, Operand offsets, + Operand embeddingTable, Long maxIdsPerPartition, Long maxUniqueIdsPerPartition, + Long inputSize) { + return XlaSparseDenseMatmul.create(scope, rowIds, colIds, values, offsets, embeddingTable, maxIdsPerPartition, maxUniqueIdsPerPartition, inputSize); + } + + /** + * The XlaSparseDenseMatmulGradWithAdagradAndCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradAndCsrInput + */ + public XlaSparseDenseMatmulGradWithAdagradAndCsrInput xlaSparseDenseMatmulGradWithAdagradAndCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand numMinibatchesPerPhysicalSparseCore, + String tableName, XlaSparseDenseMatmulGradWithAdagradAndCsrInput.Options... options) { + return XlaSparseDenseMatmulGradWithAdagradAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, numMinibatchesPerPhysicalSparseCore, tableName, options); + } + + /** + * The XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param momenta The momenta value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useNesterov The value of the useNesterov attribute + * @param exponent The value of the exponent attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput + */ + public XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput xlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand momenta, + Operand numMinibatchesPerPhysicalSparseCore, Boolean useNesterov, Float exponent, + Float beta1, Float beta2, Float epsilon, String tableName, + XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.Options... options) { + return XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, momenta, numMinibatchesPerPhysicalSparseCore, useNesterov, exponent, beta1, beta2, epsilon, tableName, options); + } + + /** + * The XlaSparseDenseMatmulGradWithAdamAndCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param momenta The momenta value + * @param velocity The velocity value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdamAndCsrInput + */ + public XlaSparseDenseMatmulGradWithAdamAndCsrInput xlaSparseDenseMatmulGradWithAdamAndCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, Operand momenta, + Operand velocity, Operand numMinibatchesPerPhysicalSparseCore, + Boolean useSumInsideSqrt, Float beta1, Float beta2, Float epsilon, String tableName, + XlaSparseDenseMatmulGradWithAdamAndCsrInput.Options... options) { + return XlaSparseDenseMatmulGradWithAdamAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, momenta, velocity, numMinibatchesPerPhysicalSparseCore, useSumInsideSqrt, beta1, beta2, epsilon, tableName, options); + } + + /** + * The XlaSparseDenseMatmulGradWithFtrlAndCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param linear The linear value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute + * @param beta The value of the beta attribute + * @param learningRatePower The value of the learningRatePower attribute + * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute + * @param l2RegularizationStrength The value of the l2RegularizationStrength attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithFtrlAndCsrInput + */ + public XlaSparseDenseMatmulGradWithFtrlAndCsrInput xlaSparseDenseMatmulGradWithFtrlAndCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand linear, + Operand numMinibatchesPerPhysicalSparseCore, Boolean multiplyLinearByLearningRate, + Float beta, Float learningRatePower, Float l1RegularizationStrength, + Float l2RegularizationStrength, String tableName, + XlaSparseDenseMatmulGradWithFtrlAndCsrInput.Options... options) { + return XlaSparseDenseMatmulGradWithFtrlAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, linear, numMinibatchesPerPhysicalSparseCore, multiplyLinearByLearningRate, beta, learningRatePower, l1RegularizationStrength, l2RegularizationStrength, tableName, options); + } + + /** + * The XlaSparseDenseMatmulGradWithSgdAndCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithSgdAndCsrInput + */ + public XlaSparseDenseMatmulGradWithSgdAndCsrInput xlaSparseDenseMatmulGradWithSgdAndCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, String tableName, + XlaSparseDenseMatmulGradWithSgdAndCsrInput.Options... options) { + return XlaSparseDenseMatmulGradWithSgdAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, numMinibatchesPerPhysicalSparseCore, tableName, options); + } + + /** + * The XlaSparseDenseMatmulWithCsrInput operation + * + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param inputSize The value of the inputSize attribute + * @param quantizationConfigLow The value of the quantizationConfigLow attribute + * @param quantizationConfigHigh The value of the quantizationConfigHigh attribute + * @param quantizationConfigNumBuckets The value of the quantizationConfigNumBuckets attribute + * @param tableName The value of the tableName attribute + * @return a new instance of XlaSparseDenseMatmulWithCsrInput + */ + public XlaSparseDenseMatmulWithCsrInput xlaSparseDenseMatmulWithCsrInput( + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, Long inputSize, + Float quantizationConfigLow, Float quantizationConfigHigh, Long quantizationConfigNumBuckets, + String tableName) { + return XlaSparseDenseMatmulWithCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, embeddingTable, numMinibatchesPerPhysicalSparseCore, inputSize, quantizationConfigLow, quantizationConfigHigh, quantizationConfigNumBuckets, tableName); + } + /** * Get the parent {@link Ops} object. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java index 34789dce80c..7fea36a03b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseAnd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java index afa384f6e38..1e57451698b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseOr.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java index dc26dc145aa..52953422482 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseXor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java index a2d9a985bae..8dcb5a72de7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java @@ -73,8 +73,6 @@ * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Invert.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java index 5874dc12979..ccf41c473f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java @@ -63,8 +63,6 @@ * bitwise_ops.left_shift(lhs, rhs) * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * - * - * @param data type for {@code z} output */ @OpMetadata( opType = LeftShift.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java index 22c95c81136..6c1407b9d19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java @@ -65,8 +65,6 @@ * bitwise_ops.right_shift(lhs, rhs) * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * - * - * @param data type for {@code z} output */ @OpMetadata( opType = RightShift.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java index 9cef8e6f2fd..fb5a6c31581 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -43,6 +44,9 @@ opType = KMC2ChainInitialization.OP_NAME, inputsClass = KMC2ChainInitialization.Inputs.class ) +@Operator( + group = "cluster" +) public final class KMC2ChainInitialization extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java index 0004fce5305..820fdabb7b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -43,6 +44,9 @@ opType = KmeansPlusPlusInitialization.OP_NAME, inputsClass = KmeansPlusPlusInitialization.Inputs.class ) +@Operator( + group = "cluster" +) public final class KmeansPlusPlusInitialization extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java index ab3d80e1064..9c513486b9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Mutually exchanges multiple tensors of identical type and shape. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveAllToAll.OP_NAME, inputsClass = CollectiveAllToAll.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveAllToAll extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAssignGroup.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAssignGroup.java index 3827bb1a158..598986e3da3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAssignGroup.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAssignGroup.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; /** @@ -38,6 +39,9 @@ opType = CollectiveAssignGroup.OP_NAME, inputsClass = CollectiveAssignGroup.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveAssignGroup extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java index 566b48c0c27..a66995e4d4e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -37,13 +38,14 @@ /** * Receives a tensor value broadcast from another device. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveBcastRecv.OP_NAME, inputsClass = CollectiveBcastRecv.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveBcastRecv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java index 4b093258948..df7a315413f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; /** * Broadcasts a tensor value to one or more other devices. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveBcastSend.OP_NAME, inputsClass = CollectiveBcastSend.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveBcastSend extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java index 2b6dc692673..57a2b134ff6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -40,13 +41,14 @@ * {@code is_stateless} means each op does not need control dependencies to other * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveGather.OP_NAME, inputsClass = CollectiveGather.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveGather extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java index 696b2eb5e23..e6ea4b8b79c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = CollectiveInitializeCommunicator.OP_NAME, inputsClass = CollectiveInitializeCommunicator.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveInitializeCommunicator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java index 009f589f181..380a949a664 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,13 +40,14 @@ *

For example, suppose there are 4 TPU instances: {@code [A, B, C, D]}. Passing * source_target_pairs={@code [[0,1],[1,2],[2,3],[3,0]]} gets the outputs: * {@code [D, A, B, C]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CollectivePermute.OP_NAME, inputsClass = CollectivePermute.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectivePermute extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java index 7f10a3ac4cd..8f6c26778e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Mutually reduces multiple tensors of identical type and shape. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveReduce.OP_NAME, inputsClass = CollectiveReduce.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java index eaae924f2df..8b89dbaf183 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -40,13 +41,14 @@ * {@code is_stateless} means each op does not need control dependencies to other * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveReduceScatter.OP_NAME, inputsClass = CollectiveReduceScatter.Inputs.class ) +@Operator( + group = "collective" +) public final class CollectiveReduceScatter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java index e33aaca7845..0dcfd8003dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +49,7 @@ opType = AnonymousHashTable.OP_NAME, inputsClass = AnonymousHashTable.Inputs.class ) +@Operator public final class AnonymousHashTable extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java index d5902c86c9d..9e3baa34b1f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -52,6 +53,7 @@ opType = AnonymousMutableDenseHashTable.OP_NAME, inputsClass = AnonymousMutableDenseHashTable.Inputs.class ) +@Operator public final class AnonymousMutableDenseHashTable extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java index a55af101afa..a010e6bc71c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -49,6 +50,7 @@ opType = AnonymousMutableHashTable.OP_NAME, inputsClass = AnonymousMutableHashTable.Inputs.class ) +@Operator public final class AnonymousMutableHashTable extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java index 00c5eea12bf..263438f8099 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -50,6 +51,7 @@ opType = AnonymousMutableHashTableOfTensors.OP_NAME, inputsClass = AnonymousMutableHashTableOfTensors.Inputs.class ) +@Operator public final class AnonymousMutableHashTableOfTensors extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java index ad97098d481..48f4f94315b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java @@ -38,8 +38,6 @@ * Returns min/max k values and their indices of the input operand in an approximate manner. * See https://arxiv.org/abs/2206.14286 for the algorithm details. * This op is only optimized on TPU currently. - * - * @param data type for {@code values} output */ @OpMetadata( opType = ApproxTopK.OP_NAME, @@ -291,17 +289,17 @@ public static class Inputs extends RawOpInputs> /** * When set to a positive value, it overrides the size determined by - * `input[reduction_dim]` for evaluating the recall. This option is useful when - * the given `input` is only a subset of the overall computation in SPMD or + * {@code input[reduction_dim]} for evaluating the recall. This option is useful when + * the given {@code input} is only a subset of the overall computation in SPMD or * distributed pipelines, where the true input size cannot be deferred by the - * `input` shape. + * {@code input} shape. */ public final long reductionInputSizeOverride; /** * When true, aggregates approximate results to top-k. When false, returns the * approximate results. The number of the approximate results is implementation - * defined and is greater equals to the specified `k`. + * defined and is greater equals to the specified {@code k}. */ public final boolean aggregateToTopk; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java index a8001c6103a..e49f3eafacc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java @@ -37,8 +37,6 @@ * Update 'ref' by assigning 'value' to it. * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = Assign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java index 2b6f78046ca..848231d569a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java @@ -37,8 +37,6 @@ * Update 'ref' by adding 'value' to it. * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = AssignAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java index 162fc069e92..cc96d634945 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java @@ -37,8 +37,6 @@ * Update 'ref' by subtracting 'value' from it. * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = AssignSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java index 6399060fb08..577f213f47d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java @@ -163,6 +163,12 @@ public static BatchFunction create(Scope scope, Iterable> inTensors, if (opts.lowPriorityMaxEnqueuedBatches != null) { opBuilder.setAttr("low_priority_max_enqueued_batches", opts.lowPriorityMaxEnqueuedBatches); } + if (opts.mixedPriorityPolicy != null) { + opBuilder.setAttr("mixed_priority_policy", opts.mixedPriorityPolicy); + } + if (opts.batchPaddingPolicy != null) { + opBuilder.setAttr("batch_padding_policy", opts.batchPaddingPolicy); + } if (opts.enableLargeBatchSplitting != null) { opBuilder.setAttr("enable_large_batch_splitting", opts.enableLargeBatchSplitting); } @@ -291,6 +297,26 @@ public static Options lowPriorityMaxEnqueuedBatches(Long lowPriorityMaxEnqueuedB return new Options().lowPriorityMaxEnqueuedBatches(lowPriorityMaxEnqueuedBatches); } + /** + * Sets the mixedPriorityPolicy option. + * + * @param mixedPriorityPolicy the mixedPriorityPolicy option + * @return this Options instance. + */ + public static Options mixedPriorityPolicy(String mixedPriorityPolicy) { + return new Options().mixedPriorityPolicy(mixedPriorityPolicy); + } + + /** + * Sets the batchPaddingPolicy option. + * + * @param batchPaddingPolicy the batchPaddingPolicy option + * @return this Options instance. + */ + public static Options batchPaddingPolicy(String batchPaddingPolicy) { + return new Options().batchPaddingPolicy(batchPaddingPolicy); + } + /** * Sets the enableLargeBatchSplitting option. * @@ -339,6 +365,10 @@ public static class Options { private Long lowPriorityMaxEnqueuedBatches; + private String mixedPriorityPolicy; + + private String batchPaddingPolicy; + private Boolean enableLargeBatchSplitting; private Options() { @@ -475,6 +505,28 @@ public Options lowPriorityMaxEnqueuedBatches(Long lowPriorityMaxEnqueuedBatches) return this; } + /** + * Sets the mixedPriorityPolicy option. + * + * @param mixedPriorityPolicy the mixedPriorityPolicy option + * @return this Options instance. + */ + public Options mixedPriorityPolicy(String mixedPriorityPolicy) { + this.mixedPriorityPolicy = mixedPriorityPolicy; + return this; + } + + /** + * Sets the batchPaddingPolicy option. + * + * @param batchPaddingPolicy the batchPaddingPolicy option + * @return this Options instance. + */ + public Options batchPaddingPolicy(String batchPaddingPolicy) { + this.batchPaddingPolicy = batchPaddingPolicy; + return this; + } + /** * Sets the enableLargeBatchSplitting option. * @@ -571,6 +623,16 @@ public static class Inputs extends RawOpInputs { */ public final long lowPriorityMaxEnqueuedBatches; + /** + * The mixedPriorityPolicy attribute + */ + public final String mixedPriorityPolicy; + + /** + * The batchPaddingPolicy attribute + */ + public final String batchPaddingPolicy; + /** * the types of tensors to be batched. */ @@ -588,12 +650,12 @@ public static class Inputs extends RawOpInputs { /** * input with a large size (i.e., larger than the largest value of - * `allowed_batch_sizes`) will be splitted into multiple batches with batch size. + * {@code allowed_batch_sizes}) will be splitted into multiple batches with batch size. */ public final boolean enableLargeBatchSplitting; public Inputs(GraphOperation op) { - super(new BatchFunction(op), op, Arrays.asList("num_batch_threads", "max_batch_size", "batch_timeout_micros", "max_enqueued_batches", "allowed_batch_sizes", "container", "shared_name", "batching_queue", "low_priority_max_batch_size", "low_priority_batch_timeout_micros", "low_priority_allowed_batch_sizes", "low_priority_max_enqueued_batches", "Tin", "Tcaptured", "Tout", "enable_large_batch_splitting")); + super(new BatchFunction(op), op, Arrays.asList("num_batch_threads", "max_batch_size", "batch_timeout_micros", "max_enqueued_batches", "allowed_batch_sizes", "container", "shared_name", "batching_queue", "low_priority_max_batch_size", "low_priority_batch_timeout_micros", "low_priority_allowed_batch_sizes", "low_priority_max_enqueued_batches", "mixed_priority_policy", "batch_padding_policy", "Tin", "Tcaptured", "Tout", "enable_large_batch_splitting")); int inputIndex = 0; int inTensorsLength = op.inputListLength("in_tensors"); inTensors = Arrays.asList((Operand[]) op.inputList(inputIndex, inTensorsLength)); @@ -613,6 +675,8 @@ public Inputs(GraphOperation op) { lowPriorityBatchTimeoutMicros = op.attributes().getAttrInt("low_priority_batch_timeout_micros"); lowPriorityAllowedBatchSizes = op.attributes().getAttrIntList("low_priority_allowed_batch_sizes"); lowPriorityMaxEnqueuedBatches = op.attributes().getAttrInt("low_priority_max_enqueued_batches"); + mixedPriorityPolicy = op.attributes().getAttrString("mixed_priority_policy"); + batchPaddingPolicy = op.attributes().getAttrString("batch_padding_policy"); Tin = op.attributes().getAttrTypeList("Tin"); Tcaptured = op.attributes().getAttrTypeList("Tcaptured"); Tout = op.attributes().getAttrTypeList("Tout"); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java index 889bd521e0d..09fa1d49bcb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java @@ -42,8 +42,6 @@ * this op outputs a copy of the input tensor where values from the {@code batch} * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, * followed by cropping along the {@code height} and {@code width} dimensions. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchToSpace.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java index c7cf592d517..65a98188342 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java @@ -42,8 +42,6 @@ * the input. The spatial dimensions of this intermediate result are then * optionally cropped according to {@code crops} to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchToSpaceNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java index c1bd2421b15..82a2a99d295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java @@ -96,8 +96,6 @@ * endian orderings will give different results. A copy from input buffer to output * buffer is made on BE machines when types are of different sizes in order to get * the same casting results as on LE machines. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Bitcast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java index 96cfa009842..165e7e12b9a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java @@ -37,8 +37,6 @@ * Return the shape of s0 op s1 with broadcast. * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. - * - * @param data type for {@code r0} output */ @OpMetadata( opType = BroadcastDynamicShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java index 68283116a9e..f29d66c8de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java @@ -29,19 +29,19 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Return the reduction indices for computing gradients of s0 op s1 with broadcast. * This is typically used by gradient computations for a broadcasting operation. - * - * @param data type for {@code r0} output */ @OpMetadata( opType = BroadcastGradientArgs.OP_NAME, inputsClass = BroadcastGradientArgs.Inputs.class ) +@Operator public final class BroadcastGradientArgs extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java index d9ada9ae323..f27247cd37a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java @@ -72,8 +72,6 @@ * The newly-created tensor takes the full memory of the broadcasted * shape. (In a graph context, {@code broadcast_to} might be fused to * subsequent operation and then be optimized away, however.) - * - * @param data type for {@code output} output */ @OpMetadata( opType = BroadcastTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CheckPinned.java similarity index 60% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CheckPinned.java index b5045c2673d..2708bcad2bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CheckPinned.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.core; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -29,50 +29,52 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; /** - * The RiscReverse operation - * - * @param data type for {@code output} output + * Checks whether a tensor is located in host memory pinned for GPU. + * When run: + *

    + *
  • Reports an {@code InvalidArgument} error if {@code tensor} is not in pinned memory.
  • + *
  • Reports a {@code FailedPrecondition} error if not built with CUDA.
  • + *
*/ @OpMetadata( - opType = RiscReverse.OP_NAME, - inputsClass = RiscReverse.Inputs.class + opType = CheckPinned.OP_NAME, + inputsClass = CheckPinned.Inputs.class ) -public final class RiscReverse extends RawOp implements Operand { +@Operator +public final class CheckPinned extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscReverse"; + public static final String OP_NAME = "CheckPinned"; private Output output; - public RiscReverse(Operation operation) { + public CheckPinned(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscReverse operation. + * Factory method to create a class wrapping a new CheckPinned operation. * * @param scope current scope * @param tensor The tensor value - * @param axis The axis value - * @param data type for {@code RiscReverse} output and operands - * @return a new instance of RiscReverse + * @param data type for {@code CheckPinned} output and operands + * @return a new instance of CheckPinned */ @Endpoint( describeByClass = true ) - public static RiscReverse create(Scope scope, Operand tensor, - Operand axis) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscReverse"); + public static CheckPinned create(Scope scope, Operand tensor) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CheckPinned"); opBuilder.addInput(tensor.asOutput()); - opBuilder.addInput(axis.asOutput()); - return new RiscReverse<>(opBuilder.build()); + return new CheckPinned<>(opBuilder.build()); } /** @@ -90,35 +92,23 @@ public Output asOutput() { } @OpInputsMetadata( - outputsClass = RiscReverse.class + outputsClass = CheckPinned.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The tensor input */ public final Operand tensor; - /** - * The axis input - */ - public final Operand axis; - - /** - * The Tidx attribute - */ - public final DataType Tidx; - /** * The T attribute */ public final DataType T; public Inputs(GraphOperation op) { - super(new RiscReverse<>(op), op, Arrays.asList("Tidx", "T")); + super(new CheckPinned<>(op), op, Arrays.asList("T")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); - axis = (Operand) op.input(inputIndex++); - Tidx = op.attributes().getAttrType("Tidx"); T = op.attributes().getAttrType("T"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java index 4477b0d4924..2ae7185a7e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java @@ -39,8 +39,6 @@ * shape as {@code t} with its values clipped to {@code clip_value_min} and {@code clip_value_max}. * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values * greater than {@code clip_value_max} are set to {@code clip_value_max}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ClipByValue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java index 0c98059a436..bc5322574d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,7 @@ opType = CompositeTensorVariantFromComponents.OP_NAME, inputsClass = CompositeTensorVariantFromComponents.Inputs.class ) +@Operator public final class CompositeTensorVariantFromComponents extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java index 11d268dfdd2..e8288c59607 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -45,6 +46,7 @@ opType = CompositeTensorVariantToComponents.OP_NAME, inputsClass = CompositeTensorVariantToComponents.Inputs.class ) +@Operator public final class CompositeTensorVariantToComponents extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -111,7 +113,7 @@ public static class Inputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = Concat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java new file mode 100644 index 00000000000..9b9a8d813c3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java @@ -0,0 +1,144 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; + +/** + * Computes offsets of concat inputs within its output. + * For example: + *
+ *
+ *
+ *

x = [2, 2, 7] + * y = [2, 3, 7] + * z = [2, 9, 7] + * offsets = concat_offset(1, [x, y, z]) + * [[a.item() for a in list(off.numpy())] for off in offsets] + * [[0, 0, 0], [0, 2, 0], [0, 5, 0]] + *

+ *
+ *
+ *

This is typically used by gradient computations for a concat operation. + */ +@OpMetadata( + opType = ConcatOffset.OP_NAME, + inputsClass = ConcatOffset.Inputs.class +) +@Operator +public final class ConcatOffset extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ConcatOffset"; + + private List> offset; + + @SuppressWarnings("unchecked") + public ConcatOffset(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int offsetLength = operation.outputListLength("offset"); + offset = Arrays.asList((Output[]) operation.outputList(outputIdx, offsetLength)); + outputIdx += offsetLength; + } + + /** + * Factory method to create a class wrapping a new ConcatOffset operation. + * + * @param scope current scope + * @param concatDim The dimension along which to concatenate. + * @param shape The {@code N} int32 or int64 vectors representing shape of tensors being concatenated. + * @param data type for {@code ConcatOffset} output and operands + * @return a new instance of ConcatOffset + */ + @Endpoint( + describeByClass = true + ) + public static ConcatOffset create(Scope scope, Operand concatDim, + Iterable> shape) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConcatOffset"); + opBuilder.addInput(concatDim.asOutput()); + opBuilder.addInputList(Operands.asOutputs(shape)); + return new ConcatOffset<>(opBuilder.build()); + } + + /** + * Gets offset. + * The {@code N} vectors representing the starting offset + * of input tensors within the concatenated output with type matching {@code shape}. + * @return offset. + */ + public List> offset() { + return offset; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) offset.iterator(); + } + + @OpInputsMetadata( + outputsClass = ConcatOffset.class + ) + public static class Inputs extends RawOpInputs> { + /** + * The dimension along which to concatenate. + */ + public final Operand concatDim; + + /** + * The {@code N} int32 or int64 vectors representing shape of tensors being concatenated. + */ + public final Iterable> shape; + + /** + * The shapeType attribute + */ + public final DataType shapeType; + + public Inputs(GraphOperation op) { + super(new ConcatOffset<>(op), op, Arrays.asList("shape_type")); + int inputIndex = 0; + concatDim = (Operand) op.input(inputIndex++); + int shapeLength = op.inputListLength("shape"); + shape = Arrays.asList((Operand[]) op.inputList(inputIndex, shapeLength)); + inputIndex += shapeLength; + shapeType = op.attributes().getAttrType("shape_type"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java index 234663012a6..a04de48877b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java @@ -42,8 +42,6 @@ * deep-copying. See the documentation of Debug* ops for more details. *

Unlike the CopyHost Op, this op does not have HostMemory constraint on its * input or output. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Copy.OP_NAME, @@ -223,9 +221,9 @@ public static class Inputs extends RawOpInputs> { /** * A list of debug op spec (op, url, gated_grpc) for attached debug * ops. Each element of the list has the format - * ;;, wherein gated_grpc is boolean represented - * as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", - * "DebugIdentity;file:///tmp/tfdbg_1;0". + * <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented + * as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + * "DebugIdentity;file:///tmp/tfdbg_1;0". */ public final String[] debugOpsSpec; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java index 4a650c8bc0a..055c9d878bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java @@ -40,8 +40,6 @@ * gRPC gating status, the output will simply forward the input tensor without * deep-copying. See the documentation of Debug* ops for more details. *

Unlike the Copy Op, this op has HostMemory constraint on its input or output. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyHost.OP_NAME, @@ -221,9 +219,9 @@ public static class Inputs extends RawOpInputs> { /** * A list of debug op spec (op, url, gated_grpc) for attached debug * ops. Each element of the list has the format - * ;;, wherein gated_grpc is boolean represented - * as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", - * "DebugIdentity;file:///tmp/tfdbg_1;0". + * <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented + * as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + * "DebugIdentity;file:///tmp/tfdbg_1;0". */ public final String[] debugOpsSpec; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java index f83d6c6ad61..166d4613d54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java @@ -35,8 +35,6 @@ /** * The CopyToMesh operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyToMesh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java index fa3467cd849..095d5b5d7ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java @@ -35,8 +35,6 @@ /** * The CopyToMeshGrad operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyToMeshGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java index 7a81a4419e6..0f404fa1419 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java @@ -35,8 +35,6 @@ /** * Increments 'ref' until it reaches 'limit'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CountUpTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java index ab5d32b01cc..be36191dd31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java @@ -298,13 +298,13 @@ public static class Inputs extends RawOpInputs { public final DataType[] outputTypes; /** - * Either the special value `local://` or a path to a file containing - * a serialized `FileDescriptorSet`. + * Either the special value {@code local://} or a path to a file containing + * a serialized {@code FileDescriptorSet}. */ public final String descriptorSource; /** - * Either `binary` or `text`. + * Either {@code binary} or {@code text}. */ public final String messageFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java index ca15dbb9a55..f0b9b3927a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java @@ -35,8 +35,6 @@ /** * Makes a copy of {@code x}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = DeepCopy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java index cc8f2bafb2f..876a1e46ee5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java @@ -41,8 +41,6 @@ * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. *

Outputs the final value of the tensor pointed to by 'ref'. - * - * @param data type for {@code value} output */ @OpMetadata( opType = DestroyTemporaryVariable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java index 1e35dc9c5d7..fb05a1a7a09 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; /** @@ -43,6 +44,7 @@ opType = DeviceIndex.OP_NAME, inputsClass = DeviceIndex.Inputs.class ) +@Operator public final class DeviceIndex extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java index b7ac9a7d91d..d4dcdcb0735 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,7 @@ opType = DummyMemoryCache.OP_NAME, inputsClass = DummyMemoryCache.Inputs.class ) +@Operator public final class DummyMemoryCache extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java index b851e0cccdf..d7d7bf7c328 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java @@ -78,8 +78,6 @@ * * * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = DynamicPartition.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java index 258aabce0e0..d160ab8255c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java @@ -54,7 +54,7 @@ * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this * {@code constant}, the output shape is *

- * merged.shape = [max(indices)] + constant
+ * merged.shape = [max(indices) + 1] + constant
  * 
*

Values are merged in order, so if an index appears in both {@code indices[m][i]} and * {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the @@ -90,8 +90,6 @@ *

* *
- * - * @param data type for {@code merged} output */ @OpMetadata( opType = DynamicStitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java index 959f61975ea..228743f17cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java @@ -222,8 +222,7 @@ public static class Inputs extends RawOpInputs { /** * boolean (if true, edit distances are normalized by length of truth). - * - * The output is: + *

The output is: */ public final boolean normalize; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java index 02c76780ba2..6f7d74d94e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java @@ -38,8 +38,6 @@ /** * Creates a tensor with the given shape. *

This operation creates a tensor of {@code shape} and {@code dtype}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Empty.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java index 131285dc0e6..bbada3714ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java @@ -38,8 +38,6 @@ * Ensures that the tensor's shape matches the expected shape. * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. - * - * @param data type for {@code output} output */ @OpMetadata( opType = EnsureShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java index e434208bade..309e5700eb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -39,13 +40,12 @@ * {@code is_constant} is true, {@code output} is a constant in the child frame; otherwise * it may be changed in the child frame. At most {@code parallel_iterations} iterations * are run in parallel in the child frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Enter.OP_NAME, inputsClass = Enter.Inputs.class ) +@Operator public final class Enter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java index 9e73edcb1f4..8dea6a66fe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java @@ -29,19 +29,19 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Exit.OP_NAME, inputsClass = Exit.Inputs.class ) +@Operator public final class Exit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java index bf17427d228..0f0e030b71d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java @@ -59,8 +59,6 @@ *

{@code -1-input.dims() <= dim <= input.dims()} *

This operation is related to {@code squeeze()}, which removes dimensions of * size 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ExpandDims.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java index cd19ad800ff..350c416e235 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java @@ -36,8 +36,6 @@ /** * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output dimension. 3D extension of {@code extract_image_patches}. - * - * @param data type for {@code patches} output */ @OpMetadata( opType = ExtractVolumePatches.OP_NAME, @@ -123,13 +121,13 @@ public static class Inputs extends RawOpInputs input; /** - * The size of the sliding window for each dimension of `input`. + * The size of the sliding window for each dimension of {@code input}. */ public final long[] ksizes; /** * 1-D of length 5. How far the centers of two consecutive patches are in - * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * {@code input}. Must be: {@code [1, stride_planes, stride_rows, stride_cols, 1]}. */ public final long[] strides; @@ -140,13 +138,11 @@ public static class Inputs extends RawOpInputsThe size-related attributes are specified as follows: + *

      * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
      * strides = [1, stride_planes, strides_rows, strides_cols, 1]
-     * ```
+     * 
*/ public final String padding; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java new file mode 100644 index 00000000000..79e63958dda --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java @@ -0,0 +1,125 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.family.TType; + +/** + * This op is used as a placeholder in If branch functions. It doesn't provide a + * valid output when run, so must either be removed (e.g. replaced with a + * function input) or guaranteed not to be used (e.g. if mirroring an + * intermediate output needed for the gradient computation of the other branch). + */ +@OpMetadata( + opType = FakeParam.OP_NAME, + inputsClass = FakeParam.Inputs.class +) +@Operator +public final class FakeParam extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "FakeParam"; + + private Output output; + + public FakeParam(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new FakeParam operation. + * + * @param scope current scope + * @param dtype The type of the output. + * @param shape
+   * The purported shape of the output. This is only used for shape inference;
+   * the output will not necessarily have this shape. Can be a partial shape.
+   * 
+ * @param data type for {@code FakeParam} output and operands + * @return a new instance of FakeParam + */ + @Endpoint( + describeByClass = true + ) + public static FakeParam create(Scope scope, Class dtype, Shape shape) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FakeParam"); + opBuilder.setAttr("dtype", Operands.toDataType(dtype)); + opBuilder.setAttr("shape", shape); + return new FakeParam<>(opBuilder.build()); + } + + /** + * Gets output. + *
+   * \"Fake\" output value. This should not be consumed by another op.
+   * 
+ * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + @OpInputsMetadata( + outputsClass = FakeParam.class + ) + public static class Inputs extends RawOpInputs> { + /** + * The type of the output. + */ + public final DataType dtype; + + /** + *
+     * The purported shape of the output. This is only used for shape inference;
+     * the output will not necessarily have this shape. Can be a partial shape.
+     * 
+ */ + public final Shape shape; + + public Inputs(GraphOperation op) { + super(new FakeParam<>(op), op, Arrays.asList("dtype", "shape")); + int inputIndex = 0; + dtype = op.attributes().getAttrType("dtype"); + shape = op.attributes().getAttrShape("shape"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java index b5ec9c52eb9..42984c322f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -37,6 +38,7 @@ opType = FileSystemSetConfiguration.OP_NAME, inputsClass = FileSystemSetConfiguration.Inputs.class ) +@Operator public final class FileSystemSetConfiguration extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java index 5ba5931795e..8634981f57c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java @@ -53,8 +53,6 @@ *
  • Because {@code tf.fill} evaluates at graph runtime, it supports dynamic shapes * based on other runtime Tensors, unlike {@code tf.constant}.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fill.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java index 1b1e3f888ee..43e09807b66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java @@ -57,9 +57,10 @@ *

    Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. + *

    Note that on TPU, if any dimension of {@code params} is of size 0 then the output will + * be the expected shape filled with zeros. On CPU and GPU an error will be + * returned. *

    See also {@code tf.batch_gather} and {@code tf.gather_nd}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Gather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java index b1a05118129..755bf4e7905 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java @@ -57,9 +57,17 @@ *

      * indices.shape[:-1] + params.shape[indices.shape[-1]:]
      * 
    - *

    Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + *

    If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

      + *
    1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
    2. + *
    3. "ERROR": raises error; GPU does not support this value.
    4. + *
    5. "IGNORE": ignore error and set the corresponding output to 0; + * supported on both CPU and GPU.
    6. + *
    *

    Some examples below. *

    Simple indexing into a matrix: *

    @@ -125,8 +133,6 @@
      *     output = [['b0', 'b1'], ['d0', 'c1']]
      * 
    *

    See also {@code tf.gather} and {@code tf.batch_gather}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = GatherNd.OP_NAME, @@ -153,6 +159,7 @@ public GatherNd(Operation operation) { * @param scope current scope * @param params The tensor from which to gather values. * @param indices Index tensor. + * @param options carries optional attribute values * @param data type for {@code GatherNd} output and operands * @return a new instance of GatherNd */ @@ -160,13 +167,30 @@ public GatherNd(Operation operation) { describeByClass = true ) public static GatherNd create(Scope scope, Operand params, - Operand indices) { + Operand indices, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GatherNd"); opBuilder.addInput(params.asOutput()); opBuilder.addInput(indices.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new GatherNd<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * Values from {@code params} gathered from indices given by {@code indices}, with @@ -182,6 +206,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.GatherNd} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = GatherNd.class ) @@ -206,13 +251,19 @@ public static class Inputs extends RawOpInputs> { */ public final DataType Tindices; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new GatherNd<>(op), op, Arrays.asList("Tparams", "Tindices")); + super(new GatherNd<>(op), op, Arrays.asList("Tparams", "Tindices", "bad_indices_policy")); int inputIndex = 0; params = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); Tparams = op.attributes().getAttrType("Tparams"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java index 27851f15855..065404e1735 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -44,6 +45,7 @@ opType = GetElementAtIndex.OP_NAME, inputsClass = GetElementAtIndex.Inputs.class ) +@Operator public final class GetElementAtIndex extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java index 43cf53336ce..f6c5340587d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,7 @@ opType = GetOptions.OP_NAME, inputsClass = GetOptions.Inputs.class ) +@Operator public final class GetOptions extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java index a2445004e6d..0cccfb42045 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java @@ -37,8 +37,6 @@ /** * Get the value of the tensor specified by its handle. - * - * @param data type for {@code value} output */ @OpMetadata( opType = GetSessionTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java index 8839f77471f..c4235de8ff2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java @@ -39,8 +39,6 @@ *

    Only accepts value typed tensors as inputs and rejects resource variable handles * as input. *

    Returns the input tensor without modification. - * - * @param data type for {@code output} output */ @OpMetadata( opType = GuaranteeConst.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java index 0846ac056c0..782cfc69f05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java @@ -51,8 +51,6 @@ * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * - * - * @param data type for {@code out} output */ @OpMetadata( opType = HistogramFixedWidth.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java similarity index 54% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java index 735ee23319f..82f5ef8f295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.core; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -23,53 +23,57 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; +import org.tensorflow.Tensor; +import org.tensorflow.op.Operands; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; /** - * The RiscCholesky operation - * - * @param data type for {@code output} output + * Returns a constant tensor on the host. Only for writing C++ tests. */ @OpMetadata( - opType = RiscCholesky.OP_NAME, - inputsClass = RiscCholesky.Inputs.class + opType = HostConst.OP_NAME, + inputsClass = HostConst.Inputs.class ) -public final class RiscCholesky extends RawOp implements Operand { +@Operator +public final class HostConst extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscCholesky"; + public static final String OP_NAME = "HostConst"; private Output output; - public RiscCholesky(Operation operation) { + public HostConst(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscCholesky operation. + * Factory method to create a class wrapping a new HostConst operation. * * @param scope current scope - * @param input The input value - * @param data type for {@code RiscCholesky} output and operands - * @return a new instance of RiscCholesky + * @param value Attr {@code value} is the tensor to return. + * @param dtype The value of the dtype attribute + * @param data type for {@code HostConst} output and operands + * @return a new instance of HostConst */ @Endpoint( describeByClass = true ) - public static RiscCholesky create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscCholesky"); - opBuilder.addInput(input.asOutput()); - return new RiscCholesky<>(opBuilder.build()); + public static HostConst create(Scope scope, Tensor value, Class dtype) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "HostConst"); + opBuilder.setAttr("value", value); + opBuilder.setAttr("dtype", Operands.toDataType(dtype)); + return new HostConst<>(opBuilder.build()); } /** @@ -87,24 +91,24 @@ public Output asOutput() { } @OpInputsMetadata( - outputsClass = RiscCholesky.class + outputsClass = HostConst.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** - * The input input + * Attr {@code value} is the tensor to return. */ - public final Operand input; + public final Tensor value; /** - * The T attribute + * The dtype attribute */ - public final DataType T; + public final DataType dtype; public Inputs(GraphOperation op) { - super(new RiscCholesky<>(op), op, Arrays.asList("T")); + super(new HostConst<>(op), op, Arrays.asList("value", "dtype")); int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); + value = op.attributes().getAttrTensor("value"); + dtype = op.attributes().getAttrType("dtype"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java index 12c84344373..d0729ab93da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java @@ -35,8 +35,6 @@ /** * Return a tensor with the same shape and contents as the input tensor or value. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Identity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java index 47cbe749ee9..12d647268ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java @@ -38,8 +38,6 @@ /** * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = ImmutableConst.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java index 1da5f008223..d8a78e24266 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java @@ -192,13 +192,13 @@ public static class Inputs extends RawOpInputs { public final Operand filename; /** - * Column index in a line to get the table `key` values from. + * Column index in a line to get the table {@code key} values from. */ public final long keyIndex; /** * Column index that represents information of a line to get the table - * `value` values from. + * {@code value} values from. */ public final long valueIndex; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java index c42388fc55c..78f37851589 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java @@ -39,8 +39,6 @@ *

      * Computes y = x; y[i, :] += v; return y.
      * 
    - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java index a39bf6d741b..31d0287aab2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java @@ -40,8 +40,6 @@ * * Computes y = x; y[i, :] -= v; return y. * - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java index 8aecb6edf8c..d34e0f15011 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java @@ -39,8 +39,6 @@ * Computes {@code x[i, :] = v; return x}. *

    Originally this function is mutative however for compilation we make this * operation create / operate on a copy of {@code x}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java index 3ae58b9158c..317eb054e29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -41,13 +42,12 @@ *

      * tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
      * 
    - * - * @param data type for {@code output} output */ @OpMetadata( opType = LinSpace.OP_NAME, inputsClass = LinSpace.Inputs.class ) +@Operator public final class LinSpace extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java index 7406671423c..7546b26f8f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java @@ -36,10 +36,6 @@ /** * Outputs all keys and values in the table. - * - * @param data type for {@code keys} output - * - * @param data type for {@code values} output */ @OpMetadata( opType = LookupTableExport.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java index b097f2ee81d..1155c94662f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java @@ -39,8 +39,6 @@ * The output {@code values} is of the type of the table values. *

    The scalar {@code default_value} is the value output for keys not present in the * table. It must also be of the same type as the table values. - * - * @param data type for {@code values} output */ @OpMetadata( opType = LookupTableFind.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java index e731a66f88d..9d52aae0ff8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +41,7 @@ opType = LookupTableRemove.OP_NAME, inputsClass = LookupTableRemove.Inputs.class ) +@Operator public final class LookupTableRemove extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java index 97069fccf75..2a4b761a8fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -50,13 +51,12 @@ *

    result = LowerBound(sorted_sequence, values) *

    result == [[1, 2, 2], * [0, 1, 5]] - * - * @param data type for {@code output} output */ @OpMetadata( opType = LowerBound.OP_NAME, inputsClass = LowerBound.Inputs.class ) +@Operator public final class LowerBound extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java index c8f4bbef925..5c91ffea662 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java @@ -34,6 +34,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -53,6 +54,7 @@ opType = MapDefun.OP_NAME, inputsClass = MapDefun.Inputs.class ) +@Operator public final class MapDefun extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java index 8242cfaa5c1..19d9c0e99f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java @@ -220,7 +220,7 @@ public static class Inputs extends RawOpInputs { public final Iterable> values; /** - * Maximum number of elements in the Staging Area. If > 0, inserts + * Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. */ public final long capacity; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java index fb03ee5c942..04c4f1481d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Max.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java index 7e4c77434b9..f5a189c9c58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java @@ -41,8 +41,6 @@ * It is usually combined with {@code Switch} to implement branching. *

    {@code Merge} forwards the first tensor to become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Merge.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java index f3db8fedac0..89ac31b5854 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Min.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java index 172a9a91c32..751bec8fd66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java @@ -57,8 +57,6 @@ * [5, 4, 4, 5, 6, 6, 5] * [5, 4, 4, 5, 6, 6, 5]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MirrorPad.OP_NAME, @@ -146,11 +144,11 @@ public static class Inputs extends RawOpInputs> { public final DataType Tpaddings; /** - * Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * Either {@code REFLECT} or {@code SYMMETRIC}. In reflect mode the padded regions * do not include the borders, while in symmetric mode the padded regions - * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` - * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and - * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * do include the borders. For example, if {@code input} is {@code [1, 2, 3]} and {@code paddings} + * is {@code [0, 2]}, then the output is {@code [1, 2, 3, 2, 1]} in reflect mode, and + * it is {@code [1, 2, 3, 3, 2]} in symmetric mode. */ public final String mode; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java index a879cf4c9f0..d1286e4bd89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -49,13 +50,12 @@ * pad(t, paddings) ==> [[ 1, 5] * [11, 28]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MirrorPadGrad.OP_NAME, inputsClass = MirrorPadGrad.Inputs.class ) +@Operator public final class MirrorPadGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -133,7 +133,7 @@ public static class Inputs extends RawOpInputs public final DataType Tpaddings; /** - * The mode used in the `MirrorPad` op. + * The mode used in the {@code MirrorPad} op. */ public final String mode; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java index b92c4b3e9e6..5e8f5709b65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -45,8 +46,6 @@ * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. * - * @param data type for {@code data} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclAllReduce} instead */ @OpMetadata( @@ -54,6 +53,7 @@ inputsClass = NcclAllReduce.Inputs.class ) @Deprecated +@Operator public final class NcclAllReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java index 4c5fad98b84..5e6c2a583ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -42,8 +43,6 @@ * output: The same as input. * shape: The shape of the input tensor. * - * @param data type for {@code output} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclBroadcast} instead */ @OpMetadata( @@ -51,6 +50,7 @@ inputsClass = NcclBroadcast.Inputs.class ) @Deprecated +@Operator public final class NcclBroadcast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java index 61ef2825e7b..cd3dea3af6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -42,8 +43,6 @@ * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. * - * @param data type for {@code data} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclReduce} instead */ @OpMetadata( @@ -51,6 +50,7 @@ inputsClass = NcclReduce.Inputs.class ) @Deprecated +@Operator public final class NcclReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java index 33e50ce1b5d..1f0f73c672f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java @@ -35,8 +35,6 @@ /** * Makes its input available to the next iteration. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NextIteration.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java index 09f55f7eaff..8ed3c25bd8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java @@ -111,8 +111,6 @@ * [0.0, 0.0, 0.0] // one_hot(-1) * ] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = OneHot.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java index b69df0d0952..51178e062f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java @@ -35,8 +35,6 @@ /** * Returns a tensor of ones with the same shape and type as x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = OnesLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java index 7ce2df9f31b..cd5c7dfdec3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java @@ -221,7 +221,7 @@ public static class Inputs extends RawOpInputs { public final Iterable> values; /** - * Maximum number of elements in the Staging Area. If > 0, inserts + * Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. */ public final long capacity; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java index d80e87f0f2d..60ddbcf6817 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java @@ -56,8 +56,6 @@ * [0, 0, 2, 2, 0, 0] * [0, 0, 0, 0, 0, 0]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Pad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java index c5cbde1618c..b12c3b896aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java @@ -50,8 +50,6 @@ * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParallelConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java index a23c3d135a8..c9fd16880ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java @@ -89,8 +89,6 @@ *

    * *
    - * - * @param data type for {@code merged} output */ @OpMetadata( opType = ParallelDynamicStitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java index 634500dcfc0..f4c450973da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java @@ -40,8 +40,6 @@ * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Placeholder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java index 9604ea0a92a..202d4cc476c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java @@ -36,8 +36,6 @@ /** * A placeholder op that passes through {@code input} when its output is not fed. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PlaceholderWithDefault.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java index 71c7f986eb6..3f1c696a0bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Prod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java index 6e92b83bf89..84816c6893f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java @@ -37,8 +37,6 @@ /** * Reshapes a quantized tensor as per the Reshape op. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedReshape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java index 5ac291f867d..76538abf9cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java @@ -39,8 +39,6 @@ *

    If multiple inputs are vectors (matrix in case of seed) then the size of the * first dimension must match. *

    The outputs are deterministic. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomIndexShuffle.OP_NAME, @@ -166,7 +164,7 @@ public static class Inputs extends RawOpInputs - * - * @param data type for {@code output} output */ @OpMetadata( opType = Range.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java index 236991942ee..f57c2781c3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java @@ -41,8 +41,6 @@ * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. - * - * @param data type for {@code value} output */ @OpMetadata( opType = ReadVariableOp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java index 9ce12da0383..5b3caab37b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java @@ -30,18 +30,18 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Receives the named tensor from send_device on recv_device. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = Recv.OP_NAME, inputsClass = Recv.Inputs.class ) +@Operator public final class Recv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java index 529841fd5fa..dca6c6a5ffc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java index f349357096b..a7e544cfaab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java index 49008ad1a36..3dc53ad9c58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java index 05851e60764..bbe161f9210 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java index 74108c84f6a..218092a2563 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -38,13 +39,12 @@ * {@code is_constant} is true, {@code output} is a constant in the child frame; otherwise * it may be changed in the child frame. At most {@code parallel_iterations} iterations * are run in parallel in the child frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefEnter.OP_NAME, inputsClass = RefEnter.Inputs.class ) +@Operator public final class RefEnter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java index 8af577f4f19..9a840da2c3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java @@ -29,19 +29,19 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefExit.OP_NAME, inputsClass = RefExit.Inputs.class ) +@Operator public final class RefExit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java index 5e699612efb..c3bb004b548 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java @@ -29,18 +29,18 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Return the same ref tensor as the input ref tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefIdentity.OP_NAME, inputsClass = RefIdentity.Inputs.class ) +@Operator public final class RefIdentity extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java index d7b4026f5c5..4baf6cc6260 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -40,13 +41,12 @@ * It is usually combined with {@code Switch} to implement branching. *

    {@code Merge} forwards the first tensor for become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefMerge.OP_NAME, inputsClass = RefMerge.Inputs.class ) +@Operator public final class RefMerge extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java index 5c7f1d2c4b7..ef647c70cd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java @@ -35,8 +35,6 @@ /** * Makes its input available to the next iteration. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefNextIteration.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java index 02c6ddc8e2f..d7ffa33956e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java @@ -37,8 +37,6 @@ /** * Forwards the {@code index}th element of {@code inputs} to {@code output}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefSelect.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java index 04a2d4811ab..2e97b2bbcad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java @@ -39,8 +39,6 @@ * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. *

    See also {@code Switch} and {@code Merge}. - * - * @param data type for {@code output_false} output */ @OpMetadata( opType = RefSwitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java index 959987e6200..503d3cfe42a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java @@ -35,8 +35,6 @@ /** * The Relayout operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = Relayout.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java index 7fd8a91fb8b..499cb8d6c72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java @@ -35,8 +35,6 @@ /** * The RelayoutLike operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = RelayoutLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java index 4b1ce466a7d..54c0aba057e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java @@ -90,8 +90,6 @@ * # shape `[]` reshapes to a scalar * reshape(t, []) ==> 7 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Reshape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java index f8e5cf5abef..0ca0faa179e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java @@ -37,8 +37,6 @@ /** * Increments variable pointed to by 'resource' until it reaches 'limit'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceCountUpTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java index 5dff2d95dc2..c458bacea4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java @@ -49,8 +49,6 @@ * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java index 1a86a282ab9..f9c6b72b544 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java @@ -37,8 +37,6 @@ /** * The ResourceGatherNd operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceGatherNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java index 4c1d9d3820c..ee6c1cf7d61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java @@ -103,6 +103,9 @@ public static ResourceScatterNdAdd create(Scope scope, Operand if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ResourceScatterNdAdd(opBuilder.build()); @@ -120,12 +123,24 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Optional attributes for {@link org.tensorflow.op.core.ResourceScatterNdAdd} */ public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -141,6 +156,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -181,8 +207,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ResourceScatterNdAdd(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ResourceScatterNdAdd(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -190,6 +221,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java index 193d4c7dfda..379843a67c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java @@ -77,6 +77,9 @@ public static ResourceScatterNdMax create(Scope scope, Operand if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ResourceScatterNdMax(opBuilder.build()); @@ -94,12 +97,24 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Optional attributes for {@link org.tensorflow.op.core.ResourceScatterNdMax} */ public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -115,6 +130,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -155,8 +181,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ResourceScatterNdMax(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ResourceScatterNdMax(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -164,6 +195,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java index 9a1023916fd..ba46417abba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java @@ -77,6 +77,9 @@ public static ResourceScatterNdMin create(Scope scope, Operand if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ResourceScatterNdMin(opBuilder.build()); @@ -94,12 +97,24 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Optional attributes for {@link org.tensorflow.op.core.ResourceScatterNdMin} */ public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -115,6 +130,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -155,8 +181,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ResourceScatterNdMin(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ResourceScatterNdMin(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -164,6 +195,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java index 4c321416231..f39e42e742b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java @@ -103,6 +103,9 @@ public static ResourceScatterNdSub create(Scope scope, Operand if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ResourceScatterNdSub(opBuilder.build()); @@ -120,12 +123,24 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Optional attributes for {@link org.tensorflow.op.core.ResourceScatterNdSub} */ public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -141,6 +156,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -181,8 +207,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ResourceScatterNdSub(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ResourceScatterNdSub(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -190,6 +221,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java index 1a21fa30916..588d923c05a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java @@ -104,6 +104,9 @@ public static ResourceScatterNdUpdate create(Scope scope, Operand { */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ResourceScatterNdUpdate(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ResourceScatterNdUpdate(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -191,6 +222,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java index 65a6ac9ab0c..711b7148209 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java @@ -76,8 +76,6 @@ * [16, 17, 18, 19], * [12, 13, 14, 15]]]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Reverse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java index b7eb3fb25a2..e18f16874f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java @@ -84,8 +84,6 @@ * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReverseSequence.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java index a2f04750d53..e190730b970 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java @@ -54,8 +54,6 @@ * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Roll.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java index bc66b56b3d1..9f0bc6a526f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java @@ -55,8 +55,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java index 083f4de2a81..902d11400e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java @@ -52,8 +52,6 @@ *

    Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions divide. *

    Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java index 162556fb11c..9b761e52419 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java @@ -54,8 +54,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java index 4264f92bc7e..7f725ad19d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java @@ -54,8 +54,6 @@ *
    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java index 7fb20e9d36e..ae8bbca9670 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java @@ -52,8 +52,6 @@ *

    Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions multiply. *

    Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java index 34487ebf9d7..ad6bcd00a16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java @@ -107,10 +107,16 @@ * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]] * - *

    Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output + *

    If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

      + *
    1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
    2. + *
    3. "ERROR": raises error; GPU does not support this value.
    4. + *
    5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
    6. + *
    */ @OpMetadata( opType = ScatterNd.OP_NAME, @@ -138,6 +144,7 @@ public ScatterNd(Operation operation) { * @param indices Tensor of indices. * @param updates Values to scatter into the output tensor. * @param shape 1-D. The shape of the output tensor. + * @param options carries optional attribute values * @param data type for {@code ScatterNd} output and operands * @param data type for {@code ScatterNd} output and operands * @return a new instance of ScatterNd @@ -146,14 +153,31 @@ public ScatterNd(Operation operation) { describeByClass = true ) public static ScatterNd create(Scope scope, - Operand indices, Operand updates, Operand shape) { + Operand indices, Operand updates, Operand shape, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ScatterNd"); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); opBuilder.addInput(shape.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new ScatterNd<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor with the given shape and updates applied according @@ -169,6 +193,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.ScatterNd} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = ScatterNd.class ) @@ -198,14 +243,20 @@ public static class Inputs extends RawOpInpu */ public final DataType Tindices; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ScatterNd<>(op), op, Arrays.asList("T", "Tindices")); + super(new ScatterNd<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); shape = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java index aef9eed4a32..257dce25682 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java @@ -62,8 +62,6 @@ * *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdAdd.OP_NAME, @@ -111,6 +109,9 @@ public static ScatterNdAdd create(Scope scope, Operand r if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ScatterNdAdd<>(opBuilder.build()); @@ -128,6 +129,16 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets outputRef. * Same as ref. Returned as a convenience for operations that want @@ -149,6 +160,8 @@ public Output asOutput() { public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -164,6 +177,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -204,8 +228,13 @@ public static class Inputs extends RawOpInputs> */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ScatterNdAdd<>(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ScatterNdAdd<>(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -213,6 +242,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java index df88fe448ac..a7ebdf162d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java @@ -29,19 +29,19 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** * Computes element-wise maximum. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdMax.OP_NAME, inputsClass = ScatterNdMax.Inputs.class ) +@Operator public final class ScatterNdMax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -83,6 +83,9 @@ public static ScatterNdMax create(Scope scope, Operand r if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ScatterNdMax<>(opBuilder.build()); @@ -100,6 +103,16 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets outputRef. * Same as ref. Returned as a convenience for operations that want @@ -121,6 +134,8 @@ public Output asOutput() { public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -136,6 +151,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -176,8 +202,13 @@ public static class Inputs extends RawOpInputs> */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ScatterNdMax<>(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ScatterNdMax<>(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -185,6 +216,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java index 4bf938febaf..3ade02671ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java @@ -29,19 +29,19 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** * Computes element-wise minimum. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdMin.OP_NAME, inputsClass = ScatterNdMin.Inputs.class ) +@Operator public final class ScatterNdMin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -83,6 +83,9 @@ public static ScatterNdMin create(Scope scope, Operand r if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ScatterNdMin<>(opBuilder.build()); @@ -100,6 +103,16 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets outputRef. * Same as ref. Returned as a convenience for operations that want @@ -121,6 +134,8 @@ public Output asOutput() { public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -136,6 +151,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -176,8 +202,13 @@ public static class Inputs extends RawOpInputs> */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ScatterNdMin<>(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ScatterNdMin<>(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -185,6 +216,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java index 4d29ef748d8..c152dadc35e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java @@ -63,8 +63,6 @@ * [1, 13, 3, 14, 14, 6, 7, 20] * *

    See {@code tf.scatter_nd} for more details about how to make updates to slices. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ScatterNdNonAliasingAdd.OP_NAME, @@ -94,6 +92,7 @@ public ScatterNdNonAliasingAdd(Operation operation) { * A tensor of indices into {@code input}. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to add to {@code input}. + * @param options carries optional attribute values * @param data type for {@code ScatterNdNonAliasingAdd} output and operands * @return a new instance of ScatterNdNonAliasingAdd */ @@ -101,14 +100,31 @@ public ScatterNdNonAliasingAdd(Operation operation) { describeByClass = true ) public static ScatterNdNonAliasingAdd create(Scope scope, Operand input, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ScatterNdNonAliasingAdd"); opBuilder.addInput(input.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new ScatterNdNonAliasingAdd<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A {@code Tensor} with the same shape as {@code input}, containing values of {@code input} @@ -124,6 +140,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.ScatterNdNonAliasingAdd} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = ScatterNdNonAliasingAdd.class ) @@ -155,14 +192,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new ScatterNdNonAliasingAdd<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java index b2018d27511..21654611e88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java @@ -63,8 +63,6 @@ * *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdSub.OP_NAME, @@ -112,6 +110,9 @@ public static ScatterNdSub create(Scope scope, Operand r if (opts.useLocking != null) { opBuilder.setAttr("use_locking", opts.useLocking); } + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } } } return new ScatterNdSub<>(opBuilder.build()); @@ -129,6 +130,16 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets outputRef. * Same as ref. Returned as a convenience for operations that want @@ -150,6 +161,8 @@ public Output asOutput() { public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -165,6 +178,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -205,8 +229,13 @@ public static class Inputs extends RawOpInputs> */ public final boolean useLocking; + /** + * The badIndicesPolicy attribute + */ + public final String badIndicesPolicy; + public Inputs(GraphOperation op) { - super(new ScatterNdSub<>(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ScatterNdSub<>(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -214,6 +243,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java index 56427f20fac..5bf1e30fe35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java @@ -62,8 +62,6 @@ *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. *

    See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdUpdate.OP_NAME, @@ -111,6 +109,9 @@ public static ScatterNdUpdate create(Scope scope, Operand(opBuilder.build()); @@ -128,6 +129,16 @@ public static Options useLocking(Boolean useLocking) { return new Options().useLocking(useLocking); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets outputRef. * Same as ref. Returned as a convenience for operations that want to @@ -149,6 +160,8 @@ public Output asOutput() { public static class Options { private Boolean useLocking; + private String badIndicesPolicy; + private Options() { } @@ -164,6 +177,17 @@ public Options useLocking(Boolean useLocking) { this.useLocking = useLocking; return this; } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } } @OpInputsMetadata( @@ -204,8 +228,13 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices", "use_locking")); + super(new ScatterNdUpdate<>(op), op, Arrays.asList("T", "Tindices", "use_locking", "bad_indices_policy")); int inputIndex = 0; ref = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); @@ -213,6 +242,7 @@ public Inputs(GraphOperation op) { T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); useLocking = op.attributes().getAttrBool("use_locking"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java index 06d274ff356..4686a81470f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java @@ -54,8 +54,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java index 711cbf7485f..60e22039589 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java @@ -57,8 +57,6 @@ * * *

    See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java index 71caff86d14..c88ea468f39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java @@ -36,8 +36,6 @@ /** * The SelectV2 operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = Select.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java index cc4effd6fe1..e722ace450a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +39,7 @@ opType = Send.OP_NAME, inputsClass = Send.Inputs.class ) +@Operator public final class Send extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java index 61af8e762a2..562b2088b93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java @@ -54,10 +54,6 @@ * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * - * - * @param data type for {@code out} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = SetDiff1d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java index 4f9f9115847..2f7592fbc03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java @@ -44,8 +44,6 @@ * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Shape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java index b56a39452d5..b53a00a1a82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java @@ -41,8 +41,6 @@ /** * Returns shape of tensors. * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ShapeN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java index 1ad02bc0f9b..2be90850900 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java @@ -45,8 +45,6 @@ * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Size.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java index b53cae539a0..37a168fb6f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java @@ -41,8 +41,6 @@ * 'begin'. *

    Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - * - * @param data type for {@code output} output */ @OpMetadata( opType = Slice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java index d8b1ed563d9..bafca31221f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java @@ -35,8 +35,6 @@ /** * Returns a copy of the input tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Snapshot.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java index d56e6ef8709..2a366e46641 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java @@ -132,8 +132,6 @@ * *

    Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToBatchNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java index f6a01ed1950..dc4fad88677 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java @@ -38,8 +38,6 @@ /** * Splits a tensor into {@code num_split} tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Split.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java index 8d1beb3fc5b..cc0525e9645 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java @@ -39,8 +39,6 @@ /** * Splits a tensor into {@code num_split} tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SplitV.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java index c904d7e7cda..52155b47d43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java @@ -50,8 +50,6 @@ * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Squeeze.OP_NAME, @@ -194,7 +192,7 @@ public static class Inputs extends RawOpInputs> { /** * If specified, only squeezes the dimensions listed. The dimension * index starts at 0. It is an error to squeeze a dimension that is not 1. Must - * be in the range `[-rank(input), rank(input))`. + * be in the range {@code [-rank(input), rank(input))}. */ public final long[] axis; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java index 81b4adc7f5e..976a86955b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java @@ -51,8 +51,6 @@ * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] * *

    This is the opposite of {@code unpack}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Stack.OP_NAME, @@ -162,7 +160,7 @@ public static class Inputs extends RawOpInputs> { /** * Dimension along which to pack. Negative values wrap around, so the - * valid range is `[-(R+1), R+1)`. + * valid range is {@code [-(R+1), R+1)}. */ public final long axis; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackClose.java new file mode 100644 index 00000000000..810fb716a34 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackClose.java @@ -0,0 +1,83 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.family.TType; + +/** + * Delete the stack from its resource container. + */ +@OpMetadata( + opType = StackClose.OP_NAME, + inputsClass = StackClose.Inputs.class +) +@Operator +public final class StackClose extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StackCloseV2"; + + public StackClose(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new StackCloseV2 operation. + * + * @param scope current scope + * @param handle The handle to a stack. + * @return a new instance of StackClose + */ + @Endpoint( + describeByClass = true + ) + public static StackClose create(Scope scope, Operand handle) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StackClose"); + opBuilder.addInput(handle.asOutput()); + return new StackClose(opBuilder.build()); + } + + @OpInputsMetadata( + outputsClass = StackClose.class + ) + public static class Inputs extends RawOpInputs { + /** + * The handle to a stack. + */ + public final Operand handle; + + public Inputs(GraphOperation op) { + super(new StackClose(op), op, Arrays.asList()); + int inputIndex = 0; + handle = (Operand) op.input(inputIndex++); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackCreate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackCreate.java new file mode 100644 index 00000000000..173c63d4c1e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackCreate.java @@ -0,0 +1,167 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** + * A stack that produces elements in first-in last-out order. + */ +@OpMetadata( + opType = StackCreate.OP_NAME, + inputsClass = StackCreate.Inputs.class +) +@Operator +public final class StackCreate extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StackV2"; + + private Output handle; + + @SuppressWarnings("unchecked") + public StackCreate(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new StackV2 operation. + * + * @param scope current scope + * @param maxSize The maximum size of the stack if non-negative. If negative, the stack + * size is unlimited. + * @param elemType The type of the elements on the stack. + * @param options carries optional attribute values + * @param data type for {@code StackV2} output and operands + * @return a new instance of StackCreate + */ + @Endpoint( + describeByClass = true + ) + public static StackCreate create(Scope scope, Operand maxSize, + Class elemType, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StackCreate"); + opBuilder.addInput(maxSize.asOutput()); + opBuilder.setAttr("elem_type", Operands.toDataType(elemType)); + if (options != null) { + for (Options opts : options) { + if (opts.stackName != null) { + opBuilder.setAttr("stack_name", opts.stackName); + } + } + } + return new StackCreate(opBuilder.build()); + } + + /** + * Sets the stackName option. + * + * @param stackName Overrides the name used for the temporary stack resource. Default + * value is the name of the 'Stack' op (which is guaranteed unique). + * @return this Options instance. + */ + public static Options stackName(String stackName) { + return new Options().stackName(stackName); + } + + /** + * Gets handle. + * The handle to the stack. + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.core.StackCreate} + */ + public static class Options { + private String stackName; + + private Options() { + } + + /** + * Sets the stackName option. + * + * @param stackName Overrides the name used for the temporary stack resource. Default + * value is the name of the 'Stack' op (which is guaranteed unique). + * @return this Options instance. + */ + public Options stackName(String stackName) { + this.stackName = stackName; + return this; + } + } + + @OpInputsMetadata( + outputsClass = StackCreate.class + ) + public static class Inputs extends RawOpInputs { + /** + * The maximum size of the stack if non-negative. If negative, the stack + * size is unlimited. + */ + public final Operand maxSize; + + /** + * The type of the elements on the stack. + */ + public final DataType elemType; + + /** + * Overrides the name used for the temporary stack resource. Default + * value is the name of the 'Stack' op (which is guaranteed unique). + */ + public final String stackName; + + public Inputs(GraphOperation op) { + super(new StackCreate(op), op, Arrays.asList("elem_type", "stack_name")); + int inputIndex = 0; + maxSize = (Operand) op.input(inputIndex++); + elemType = op.attributes().getAttrType("elem_type"); + stackName = op.attributes().getAttrString("stack_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java new file mode 100644 index 00000000000..502cfcc8c06 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java @@ -0,0 +1,114 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.family.TType; + +/** + * Pop the element at the top of the stack. + */ +@OpMetadata( + opType = StackPop.OP_NAME, + inputsClass = StackPop.Inputs.class +) +@Operator +public final class StackPop extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StackPopV2"; + + private Output elem; + + public StackPop(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + elem = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new StackPopV2 operation. + * + * @param scope current scope + * @param handle The handle to a stack. + * @param elemType The type of the elem that is popped. + * @param data type for {@code StackPopV2} output and operands + * @return a new instance of StackPop + */ + @Endpoint( + describeByClass = true + ) + public static StackPop create(Scope scope, Operand handle, + Class elemType) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StackPop"); + opBuilder.addInput(handle.asOutput()); + opBuilder.setAttr("elem_type", Operands.toDataType(elemType)); + return new StackPop<>(opBuilder.build()); + } + + /** + * Gets elem. + * The tensor that is popped from the top of the stack. + * @return elem. + */ + public Output elem() { + return elem; + } + + @Override + public Output asOutput() { + return elem; + } + + @OpInputsMetadata( + outputsClass = StackPop.class + ) + public static class Inputs extends RawOpInputs> { + /** + * The handle to a stack. + */ + public final Operand handle; + + /** + * The type of the elem that is popped. + */ + public final DataType elemType; + + public Inputs(GraphOperation op) { + super(new StackPop<>(op), op, Arrays.asList("elem_type")); + int inputIndex = 0; + handle = (Operand) op.input(inputIndex++); + elemType = op.attributes().getAttrType("elem_type"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java new file mode 100644 index 00000000000..f9f05ff1912 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java @@ -0,0 +1,164 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.family.TType; + +/** + * Push an element onto the stack. + */ +@OpMetadata( + opType = StackPush.OP_NAME, + inputsClass = StackPush.Inputs.class +) +@Operator +public final class StackPush extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StackPushV2"; + + private Output output; + + public StackPush(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new StackPushV2 operation. + * + * @param scope current scope + * @param handle The handle to a stack. + * @param elem The tensor to be pushed onto the stack. + * @param options carries optional attribute values + * @param data type for {@code StackPushV2} output and operands + * @return a new instance of StackPush + */ + @Endpoint( + describeByClass = true + ) + public static StackPush create(Scope scope, Operand handle, + Operand elem, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StackPush"); + opBuilder.addInput(handle.asOutput()); + opBuilder.addInput(elem.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.swapMemory != null) { + opBuilder.setAttr("swap_memory", opts.swapMemory); + } + } + } + return new StackPush<>(opBuilder.build()); + } + + /** + * Sets the swapMemory option. + * + * @param swapMemory Swap {@code elem} to CPU. Default to false. + * @return this Options instance. + */ + public static Options swapMemory(Boolean swapMemory) { + return new Options().swapMemory(swapMemory); + } + + /** + * Gets output. + * The same tensor as the input 'elem'. + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.core.StackPush} + */ + public static class Options { + private Boolean swapMemory; + + private Options() { + } + + /** + * Sets the swapMemory option. + * + * @param swapMemory Swap {@code elem} to CPU. Default to false. + * @return this Options instance. + */ + public Options swapMemory(Boolean swapMemory) { + this.swapMemory = swapMemory; + return this; + } + } + + @OpInputsMetadata( + outputsClass = StackPush.class + ) + public static class Inputs extends RawOpInputs> { + /** + * The handle to a stack. + */ + public final Operand handle; + + /** + * The tensor to be pushed onto the stack. + */ + public final Operand elem; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Swap {@code elem} to CPU. Default to false. + */ + public final boolean swapMemory; + + public Inputs(GraphOperation op) { + super(new StackPush<>(op), op, Arrays.asList("T", "swap_memory")); + int inputIndex = 0; + handle = (Operand) op.input(inputIndex++); + elem = (Operand) op.input(inputIndex++); + T = op.attributes().getAttrType("T"); + swapMemory = op.attributes().getAttrBool("swap_memory"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java index 3b68db75d98..11adc169e0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java @@ -203,14 +203,14 @@ public static class Inputs extends RawOpInputs { public final Iterable> values; /** - * Maximum number of elements in the Staging Area. If > 0, inserts + * Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. */ public final long capacity; /** * The maximum number of bytes allowed for Tensors in the Staging Area. - * If > 0, inserts will block until sufficient space is available. + * If > 0, inserts will block until sufficient space is available. */ public final long memoryLimit; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java index d314d761266..915d6908936 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java @@ -34,6 +34,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -65,6 +66,7 @@ opType = StatelessCase.OP_NAME, inputsClass = StatelessCase.Inputs.class ) +@Operator public final class StatelessCase extends RawOp implements Case { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java index a4d7a203402..a06a2c8017d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -39,13 +40,12 @@ * Stochastically cast a given tensor from floats to ints. * The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN. *

    The outputs are a deterministic function of {@code input}, {@code key}, {@code counter}, {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StochasticCastToInt.OP_NAME, inputsClass = StochasticCastToInt.Inputs.class ) +@Operator public final class StochasticCastToInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java index c2086cb3e92..fb486c42253 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java @@ -85,8 +85,6 @@ *

  • Adversarial training, where no backprop should happen through the adversarial * example generation process.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = StopGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java index fd8a07ebe47..ec55dae1c24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java @@ -133,8 +133,6 @@ *

    Requirements: * {@code 0 != strides[i] for i in [0, m)} * {@code ellipsis_mask must be a power of two (only one ellipsis)} - * - * @param data type for {@code output} output */ @OpMetadata( opType = StridedSlice.OP_NAME, @@ -422,40 +420,40 @@ public static class Inputs extends RawOpInpu /** * a bitmask where a bit i being 1 means to ignore the begin * value and instead use the largest interval possible. At runtime - * begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or - * `[-1, n-1]` if `stride[i] < 0` + * begin[i] will be replaced with {@code [0, n-1)} if {@code stride[i] > 0} or + * {@code [-1, n-1]} if {@code stride[i] < 0} */ public final long beginMask; /** - * analogous to `begin_mask` + * analogous to {@code begin_mask} */ public final long endMask; /** - * a bitmask where bit `i` being 1 means the `i`th + * a bitmask where bit {@code i} being 1 means the {@code i}th * position is actually an ellipsis. One bit at most can be 1. - * If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` - * is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + * If {@code ellipsis_mask == 0}, then an implicit ellipsis mask of {@code 1 << (m+1)} + * is provided. This means that {@code foo[3:5] == foo[3:5, ...]}. An ellipsis * implicitly creates as many range specifications as necessary to fully * specify the sliced range for every dimension. For example for a 4-dimensional - * tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + * tensor {@code foo} the slice {@code foo[2, ..., 5:8]} implies {@code foo[2, :, :, 5:8]}. */ public final long ellipsisMask; /** - * a bitmask where bit `i` being 1 means the `i`th + * a bitmask where bit {@code i} being 1 means the {@code i}th * specification creates a new shape 1 dimension. For example - * `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + * {@code foo[:4, tf.newaxis, :2]} would produce a shape {@code (4, 1, 2)} tensor. */ public final long newAxisMask; /** - * a bitmask where bit `i` implies that the `i`th + * a bitmask where bit {@code i} implies that the {@code i}th * specification should shrink the dimensionality. begin and end * must imply a slice of size 1 in the dimension. For example in - * python one might do `foo[:, 3, :]` which would result in - * `shrink_axis_mask` being 2. + * python one might do {@code foo[:, 3, :]} which would result in + * {@code shrink_axis_mask} being 2. */ public final long shrinkAxisMask; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java index b2ab8d606e2..2911a675905 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java @@ -41,8 +41,6 @@ * {@code begin}, {@code end}, {@code strides}, etc. work exactly as in {@code StridedSlice}. *

    NOTE this op currently does not support broadcasting and so {@code value}'s * shape must be exactly the shape produced by the slice of {@code ref}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = StridedSliceAssign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java index 2a234c9ab7a..fcd7518dd87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java @@ -43,8 +43,6 @@ *

    Arguments are the same as StridedSliceGrad with the exception that * {@code dy} is the input gradient to be propagated and {@code shape} is the * shape of {@code StridedSlice}'s {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StridedSliceGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java index 15957ea2189..abcdb1ee9ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Sum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java index c6a8f810467..c6842c9ab87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java @@ -39,8 +39,6 @@ * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. *

    See also {@code RefSwitch} and {@code Merge}. - * - * @param data type for {@code output_false} output */ @OpMetadata( opType = SwitchCond.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SyncDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SyncDevice.java index 357e65e6b2b..a85fd9312c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SyncDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SyncDevice.java @@ -27,6 +27,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; /** * Synchronizes the device this op is run on. @@ -37,6 +38,7 @@ opType = SyncDevice.OP_NAME, inputsClass = SyncDevice.Inputs.class ) +@Operator public final class SyncDevice extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java index 3e8c8a70ec8..d66021bb728 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java @@ -48,8 +48,6 @@ * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * - * @param data type for {@code ref} output */ @OpMetadata( opType = TemporaryVariable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java index b3dbc08ef3e..75ba48a0102 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java @@ -48,8 +48,6 @@ * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...) * *

    All elements must have the same shape (excepting the first dimension). - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java index 0f7fd351089..60d8b437b00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java @@ -40,8 +40,6 @@ /** * Gather specific elements from the TensorArray into output {@code value}. * All elements selected by {@code indices} must have the same shape. - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java index 6e52e6ef906..d1cf5c89e65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java @@ -39,8 +39,6 @@ /** * The TensorArrayPack operation - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayPack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java index 6765205c463..f5a0aa073a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java @@ -38,8 +38,6 @@ /** * Read an element from the TensorArray into output {@code value}. - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayRead.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java index 664783a09c5..70ef65f9314 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java @@ -48,8 +48,6 @@ * is not already set. * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java index d955a6a636d..6190f9c1c01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java @@ -39,8 +39,6 @@ * The shape of the elements of the given list, as a tensor. * input_handle: the list * element_shape: the shape of elements of the list - * - * @param data type for {@code element_shape} output */ @OpMetadata( opType = TensorListElementShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java index 27a627b4759..ac725c72b97 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java @@ -42,8 +42,6 @@ *

    input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. - * - * @param data type for {@code values} output */ @OpMetadata( opType = TensorListGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java index 1ea76d2101e..244704b5754 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java @@ -40,8 +40,6 @@ * input_handle: the list * index: the position in the list from which an element will be retrieved * item: the element at that position - * - * @param data type for {@code item} output */ @OpMetadata( opType = TensorListGetItem.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java index ee7a5cde1c9..af805e71f9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java @@ -42,8 +42,6 @@ * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListPopBack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java index fec4f942658..2d058b8e00d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java @@ -41,8 +41,6 @@ *

    input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListStack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java index dccdc1ee996..a3e8b54e888 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java @@ -39,8 +39,6 @@ * input_handle: the input map * key: the key to be looked up * value: the value found from the given key - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorMapLookup.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java index b2a217c98e6..8942b2f9f8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java @@ -38,8 +38,6 @@ * Returns a Tensor stack of all keys in a tensor map. * input_handle: the input map * keys: the returned Tensor of all keys in the map - * - * @param data type for {@code keys} output */ @OpMetadata( opType = TensorMapStackKeys.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java index a72a1defde1..77d1dd111d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java @@ -94,10 +94,16 @@ * * * - *

    Note: on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output + *

    If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

      + *
    1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
    2. + *
    3. "ERROR": raises error; GPU does not support this value.
    4. + *
    5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
    6. + *
    */ @OpMetadata( opType = TensorScatterNdAdd.OP_NAME, @@ -125,6 +131,7 @@ public TensorScatterNdAdd(Operation operation) { * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterAdd} output and operands * @return a new instance of TensorScatterNdAdd */ @@ -132,14 +139,31 @@ public TensorScatterNdAdd(Operation operation) { describeByClass = true ) public static TensorScatterNdAdd create(Scope scope, Operand tensor, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorScatterNdAdd"); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new TensorScatterNdAdd<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor copied from tensor and updates added according to the indices. @@ -154,6 +178,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.TensorScatterNdAdd} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorScatterNdAdd.class ) @@ -183,14 +228,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new TensorScatterNdAdd<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java index ceddda24a20..cbf9b2dd471 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java @@ -50,8 +50,6 @@ * * *

    Refer to {@code tf.tensor_scatter_nd_update} for more details. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdMax.OP_NAME, @@ -79,6 +77,7 @@ public TensorScatterNdMax(Operation operation) { * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterMax} output and operands * @return a new instance of TensorScatterNdMax */ @@ -86,14 +85,31 @@ public TensorScatterNdMax(Operation operation) { describeByClass = true ) public static TensorScatterNdMax create(Scope scope, Operand tensor, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorScatterNdMax"); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new TensorScatterNdMax<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices. @@ -108,6 +124,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.TensorScatterNdMax} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorScatterNdMax.class ) @@ -137,14 +174,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new TensorScatterNdMax<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java index b6da07b4c31..7db99c551d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java @@ -36,8 +36,6 @@ /** * The TensorScatterMin operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdMin.OP_NAME, @@ -65,6 +63,7 @@ public TensorScatterNdMin(Operation operation) { * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterMin} output and operands * @return a new instance of TensorScatterNdMin */ @@ -72,14 +71,31 @@ public TensorScatterNdMin(Operation operation) { describeByClass = true ) public static TensorScatterNdMin create(Scope scope, Operand tensor, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorScatterNdMin"); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new TensorScatterNdMin<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices. @@ -94,6 +110,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.TensorScatterNdMin} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorScatterNdMin.class ) @@ -123,14 +160,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new TensorScatterNdMin<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java index 3623707e77e..095e0428962 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java @@ -91,8 +91,6 @@ * *

    Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdSub.OP_NAME, @@ -120,6 +118,7 @@ public TensorScatterNdSub(Operation operation) { * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterSub} output and operands * @return a new instance of TensorScatterNdSub */ @@ -127,14 +126,31 @@ public TensorScatterNdSub(Operation operation) { describeByClass = true ) public static TensorScatterNdSub create(Scope scope, Operand tensor, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorScatterNdSub"); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new TensorScatterNdSub<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor copied from tensor and updates subtracted according to the indices. @@ -149,6 +165,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.TensorScatterNdSub} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorScatterNdSub.class ) @@ -178,14 +215,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new TensorScatterNdSub<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java index 3c53fca7eab..96323c0db29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java @@ -42,7 +42,6 @@ * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. *

    If {@code indices} contains duplicates, then we pick the last update for the index. - *

    If an out of bound index is found on CPU, an error is returned. *

    WARNING: There are some GPU specific semantics for this operation. *

      *
    • If an out of bound index is found, the index is ignored.
    • @@ -64,9 +63,17 @@ *
        * indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
        * 
      + *

      If {@code indices} contains any out-of-bound indices, depending on + * {@code bad_indices_policy}, the op will either return an error or ignore the + * out-of-bound indices. {@code bad_indices_policy} can be one of the following values: + *

        + *
      1. "" or "DEFAULT": raises on CPU and ignore on GPU. This is because + * historically on CPU and GPU we handle errors in different ways, and for + * backward compatibility we keep the default behavior.
      2. + *
      3. "ERROR": raises error; GPU does not support this value.
      4. + *
      5. "IGNORE": ignore the bad indices; supported on both CPU and GPU.
      6. + *
      *

      For usage examples see the python tf.tensor_scatter_nd_update {@link org.tensorflow.op.Ops#tensorScatterNdUpdate} function - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdUpdate.OP_NAME, @@ -94,6 +101,7 @@ public TensorScatterNdUpdate(Operation operation) { * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param options carries optional attribute values * @param data type for {@code TensorScatterUpdate} output and operands * @return a new instance of TensorScatterNdUpdate */ @@ -101,14 +109,31 @@ public TensorScatterNdUpdate(Operation operation) { describeByClass = true ) public static TensorScatterNdUpdate create(Scope scope, Operand tensor, - Operand indices, Operand updates) { + Operand indices, Operand updates, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorScatterNdUpdate"); opBuilder.addInput(tensor.asOutput()); opBuilder.addInput(indices.asOutput()); opBuilder.addInput(updates.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.badIndicesPolicy != null) { + opBuilder.setAttr("bad_indices_policy", opts.badIndicesPolicy); + } + } + } return new TensorScatterNdUpdate<>(opBuilder.build()); } + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public static Options badIndicesPolicy(String badIndicesPolicy) { + return new Options().badIndicesPolicy(badIndicesPolicy); + } + /** * Gets output. * A new tensor with the given shape and updates applied according @@ -124,6 +149,27 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.core.TensorScatterNdUpdate} + */ + public static class Options { + private String badIndicesPolicy; + + private Options() { + } + + /** + * Sets the badIndicesPolicy option. + * + * @param badIndicesPolicy the badIndicesPolicy option + * @return this Options instance. + */ + public Options badIndicesPolicy(String badIndicesPolicy) { + this.badIndicesPolicy = badIndicesPolicy; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorScatterNdUpdate.class ) @@ -153,14 +199,20 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "Tindices")); + super(new TensorScatterNdUpdate<>(op), op, Arrays.asList("T", "Tindices", "bad_indices_policy")); int inputIndex = 0; tensor = (Operand) op.input(inputIndex++); indices = (Operand) op.input(inputIndex++); updates = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); Tindices = op.attributes().getAttrType("Tindices"); + badIndicesPolicy = op.attributes().getAttrString("bad_indices_policy"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java index 23b2d386a05..de80c141d72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java @@ -41,8 +41,6 @@ * {@code strides} etc. work exactly as in {@code StridedSlice}. *

      NOTE this op currently does not support broadcasting and so {@code value}'s shape * must be exactly the shape produced by the slice of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorStridedSliceUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java index fa25cd34464..7339fdbb3de 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java @@ -67,8 +67,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Tile.OP_NAME, @@ -93,7 +91,7 @@ public Tile(Operation operation) { * Factory method to create a class wrapping a new Tile operation. * * @param scope current scope - * @param input 1-D or higher. + * @param input Can be of any rank. * @param multiples 1-D. Length must be the same as the number of dimensions in {@code input} * @param data type for {@code Tile} output and operands * @return a new instance of Tile @@ -128,7 +126,7 @@ public Output asOutput() { ) public static class Inputs extends RawOpInputs> { /** - * 1-D or higher. + * Can be of any rank. */ public final Operand input; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java index a49747c48ca..fa4c04f3c27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java @@ -53,8 +53,6 @@ * shared_name: Instances of Unbatch with the same container and shared_name are * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. - * - * @param data type for {@code unbatched_tensor} output */ @OpMetadata( opType = Unbatch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java index 912e08c3a6b..25418f3986f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java @@ -49,8 +49,6 @@ * shared_name: Instances of UnbatchGrad with the same container and shared_name * are assumed to possibly belong to the same batch. If left empty, the op name * will be used as the shared name. - * - * @param data type for {@code batched_grad} output */ @OpMetadata( opType = UnbatchGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java index f96475d3bf8..f1a4eb739d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,13 +40,12 @@ * Given quantized {@code operand} which was quantized using {@code scales} and {@code zero_points}, performs clip by value using {@code min} and {@code max} values. * If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. * Otherwise (per-channel quantized), the clipping is also done per-channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedClipByValue.OP_NAME, inputsClass = UniformQuantizedClipByValue.Inputs.class ) +@Operator public final class UniformQuantizedClipByValue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java index c4324a9f324..4d17cf9f141 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java @@ -74,10 +74,6 @@ * [2, 0]] * idx ==> [0, 1, 1] * - * - * @param data type for {@code y} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = Unique.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java index 80a1804887f..8046082f95b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java @@ -78,10 +78,6 @@ * idx ==> [0, 1, 1] * count ==> [1, 2] * - * - * @param data type for {@code y} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = UniqueWithCounts.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java index 5393635bc69..ec7c8f8c6e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java @@ -52,8 +52,6 @@ *

      {@literal @}compatibility(numpy)
      * Equivalent to np.unravel_index *
      {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnravelIndex.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java index 3d04ec1ebc7..64c8de23911 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java @@ -46,8 +46,6 @@ * and each tensor in {@code output} will have shape {@code (A, C, D)}. * Etc. *

      This is the opposite of {@code pack}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Unstack.OP_NAME, @@ -163,7 +161,7 @@ public static class Inputs extends RawOpInputs> { /** * Dimension along which to unpack. Negative values wrap around, so the - * valid range is `[-R, R)`. + * valid range is {@code [-R, R)}. */ public final long axis; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java index b50ba0d9399..78e45391c8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -50,13 +51,12 @@ *

      result = UpperBound(sorted_sequence, values) *

      result == [[1, 2, 4], * [0, 2, 5]] - * - * @param data type for {@code output} output */ @OpMetadata( opType = UpperBound.OP_NAME, inputsClass = UpperBound.Inputs.class ) +@Operator public final class UpperBound extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java index a0febf9c223..d8b09bfddde 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java @@ -40,8 +40,6 @@ * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. - * - * @param data type for {@code ref} output */ @OpMetadata( opType = Variable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java index 3f94b9efbd6..abfd8d7c504 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java @@ -44,8 +44,6 @@ * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = VariableShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java index 792a37d112c..497cf5128b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java @@ -35,8 +35,6 @@ /** * Returns a tensor of zeros with the same shape and type as x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = ZerosLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java index ec5f54534a0..7554d2de110 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = AnonymousMemoryCache.OP_NAME, inputsClass = AnonymousMemoryCache.Inputs.class ) +@Operator( + group = "data" +) public final class AnonymousMemoryCache extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertPrevDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertPrevDataset.java index abd5286f98b..bab20bf1f31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertPrevDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertPrevDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -50,6 +51,9 @@ opType = AssertPrevDataset.OP_NAME, inputsClass = AssertPrevDataset.Inputs.class ) +@Operator( + group = "data" +) public final class AssertPrevDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java index 996fd0328f1..b070f5995d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = CompressElement.OP_NAME, inputsClass = CompressElement.Inputs.class ) +@Operator( + group = "data" +) public final class CompressElement extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java new file mode 100644 index 00000000000..573670b6b26 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java @@ -0,0 +1,107 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.family.TType; + +/** + * Returns the fingerprint of {@code input_dataset}. + * Returns the fingerprint of {@code input_dataset}. + */ +@OpMetadata( + opType = DatasetFingerprint.OP_NAME, + inputsClass = DatasetFingerprint.Inputs.class +) +@Operator( + group = "data" +) +public final class DatasetFingerprint extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "DatasetFingerprint"; + + private Output fingerprint; + + @SuppressWarnings("unchecked") + public DatasetFingerprint(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + fingerprint = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new DatasetFingerprint operation. + * + * @param scope current scope + * @param inputDataset A variant tensor representing the dataset to return fingerprint for. + * @return a new instance of DatasetFingerprint + */ + @Endpoint( + describeByClass = true + ) + public static DatasetFingerprint create(Scope scope, Operand inputDataset) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DatasetFingerprint"); + opBuilder.addInput(inputDataset.asOutput()); + return new DatasetFingerprint(opBuilder.build()); + } + + /** + * Gets fingerprint. + * The fingerprint of {@code input_dataset} in {@code uint64} + * @return fingerprint. + */ + public Output fingerprint() { + return fingerprint; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) fingerprint; + } + + @OpInputsMetadata( + outputsClass = DatasetFingerprint.class + ) + public static class Inputs extends RawOpInputs { + /** + * A variant tensor representing the dataset to return fingerprint for. + */ + public final Operand inputDataset; + + public Inputs(GraphOperation op) { + super(new DatasetFingerprint(op), op, Arrays.asList()); + int inputIndex = 0; + inputDataset = (Operand) op.input(inputIndex++); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java index 67486cf98bf..a701e8ab58b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = DeleteMemoryCache.OP_NAME, inputsClass = DeleteMemoryCache.Inputs.class ) +@Operator( + group = "data" +) public final class DeleteMemoryCache extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java index 47d761f78d3..8f8b0b35d33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = DeleteMultiDeviceIterator.OP_NAME, inputsClass = DeleteMultiDeviceIterator.Inputs.class ) +@Operator( + group = "data" +) public final class DeleteMultiDeviceIterator extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java index af321f086d8..be7fc1c6ee8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = DummyIterationCounter.OP_NAME, inputsClass = DummyIterationCounter.Inputs.class ) +@Operator( + group = "data" +) public final class DummyIterationCounter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GlobalShuffleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GlobalShuffleDataset.java new file mode 100644 index 00000000000..19ec4cd2e96 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GlobalShuffleDataset.java @@ -0,0 +1,230 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +/** + * The GlobalShuffleDataset operation + */ +@OpMetadata( + opType = GlobalShuffleDataset.OP_NAME, + inputsClass = GlobalShuffleDataset.Inputs.class +) +public final class GlobalShuffleDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "GlobalShuffleDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + public GlobalShuffleDataset(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new GlobalShuffleDataset operation. + * + * @param scope current scope + * @param inputDataset The inputDataset value + * @param seed The seed value + * @param seed2 The seed2 value + * @param seedGenerator The seedGenerator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of GlobalShuffleDataset + */ + @Endpoint( + describeByClass = true + ) + public static GlobalShuffleDataset create(Scope scope, Operand inputDataset, + Operand seed, Operand seed2, Operand seedGenerator, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GlobalShuffleDataset"); + opBuilder.addInput(inputDataset.asOutput()); + opBuilder.addInput(seed.asOutput()); + opBuilder.addInput(seed2.asOutput()); + opBuilder.addInput(seedGenerator.asOutput()); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.reshuffleEachIteration != null) { + opBuilder.setAttr("reshuffle_each_iteration", opts.reshuffleEachIteration); + } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } + return new GlobalShuffleDataset(opBuilder.build()); + } + + /** + * Sets the reshuffleEachIteration option. + * + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + */ + public static Options reshuffleEachIteration(Boolean reshuffleEachIteration) { + return new Options().reshuffleEachIteration(reshuffleEachIteration); + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.data.GlobalShuffleDataset} + */ + public static class Options { + private Boolean reshuffleEachIteration; + + private String metadata; + + private Options() { + } + + /** + * Sets the reshuffleEachIteration option. + * + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + */ + public Options reshuffleEachIteration(Boolean reshuffleEachIteration) { + this.reshuffleEachIteration = reshuffleEachIteration; + return this; + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + + @OpInputsMetadata( + outputsClass = GlobalShuffleDataset.class + ) + public static class Inputs extends RawOpInputs { + /** + * The inputDataset input + */ + public final Operand inputDataset; + + /** + * The seed input + */ + public final Operand seed; + + /** + * The seed2 input + */ + public final Operand seed2; + + /** + * The seedGenerator input + */ + public final Operand seedGenerator; + + /** + * The reshuffleEachIteration attribute + */ + public final boolean reshuffleEachIteration; + + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + + /** + * The metadata attribute + */ + public final String metadata; + + public Inputs(GraphOperation op) { + super(new GlobalShuffleDataset(op), op, Arrays.asList("reshuffle_each_iteration", "output_types", "output_shapes", "metadata")); + int inputIndex = 0; + inputDataset = (Operand) op.input(inputIndex++); + seed = (Operand) op.input(inputIndex++); + seed2 = (Operand) op.input(inputIndex++); + seedGenerator = (Operand) op.input(inputIndex++); + reshuffleEachIteration = op.attributes().getAttrBool("reshuffle_each_iteration"); + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IndexFlatMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IndexFlatMapDataset.java new file mode 100644 index 00000000000..b5d3f116ad5 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IndexFlatMapDataset.java @@ -0,0 +1,224 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.ConcreteFunction; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +/** + * The IndexFlatMapDataset operation + */ +@OpMetadata( + opType = IndexFlatMapDataset.OP_NAME, + inputsClass = IndexFlatMapDataset.Inputs.class +) +@Operator( + group = "data" +) +public final class IndexFlatMapDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "IndexFlatMapDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + public IndexFlatMapDataset(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new IndexFlatMapDataset operation. + * + * @param scope current scope + * @param inputDataset The inputDataset value + * @param mapFuncOtherArgs The mapFuncOtherArgs value + * @param indexMapFuncOtherArgs The indexMapFuncOtherArgs value + * @param outputCardinality The outputCardinality value + * @param mapFunc The value of the mapFunc attribute + * @param indexMapFunc The value of the indexMapFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of IndexFlatMapDataset + */ + @Endpoint( + describeByClass = true + ) + public static IndexFlatMapDataset create(Scope scope, Operand inputDataset, + Iterable> mapFuncOtherArgs, Iterable> indexMapFuncOtherArgs, + Operand outputCardinality, ConcreteFunction mapFunc, ConcreteFunction indexMapFunc, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IndexFlatMapDataset"); + opBuilder.addInput(inputDataset.asOutput()); + opBuilder.addInputList(Operands.asOutputs(mapFuncOtherArgs)); + opBuilder.addInputList(Operands.asOutputs(indexMapFuncOtherArgs)); + opBuilder.addInput(outputCardinality.asOutput()); + opBuilder.setAttr("map_func", mapFunc); + opBuilder.setAttr("index_map_func", indexMapFunc); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } + return new IndexFlatMapDataset(opBuilder.build()); + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.data.IndexFlatMapDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + + @OpInputsMetadata( + outputsClass = IndexFlatMapDataset.class + ) + public static class Inputs extends RawOpInputs { + /** + * The inputDataset input + */ + public final Operand inputDataset; + + /** + * The mapFuncOtherArgs input + */ + public final Iterable> mapFuncOtherArgs; + + /** + * The indexMapFuncOtherArgs input + */ + public final Iterable> indexMapFuncOtherArgs; + + /** + * The outputCardinality input + */ + public final Operand outputCardinality; + + /** + * The TmapFuncArgs attribute + */ + public final DataType[] TmapFuncArgs; + + /** + * The TindexMapFuncArgs attribute + */ + public final DataType[] TindexMapFuncArgs; + + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + + /** + * The metadata attribute + */ + public final String metadata; + + public Inputs(GraphOperation op) { + super(new IndexFlatMapDataset(op), op, Arrays.asList("Tmap_func_args", "Tindex_map_func_args", "output_types", "output_shapes", "metadata")); + int inputIndex = 0; + inputDataset = (Operand) op.input(inputIndex++); + int mapFuncOtherArgsLength = op.inputListLength("map_func_other_args"); + mapFuncOtherArgs = Arrays.asList((Operand[]) op.inputList(inputIndex, mapFuncOtherArgsLength)); + inputIndex += mapFuncOtherArgsLength; + int indexMapFuncOtherArgsLength = op.inputListLength("index_map_func_other_args"); + indexMapFuncOtherArgs = Arrays.asList((Operand[]) op.inputList(inputIndex, indexMapFuncOtherArgsLength)); + inputIndex += indexMapFuncOtherArgsLength; + outputCardinality = (Operand) op.input(inputIndex++); + TmapFuncArgs = op.attributes().getAttrTypeList("Tmap_func_args"); + TindexMapFuncArgs = op.attributes().getAttrTypeList("Tindex_map_func_args"); + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java index 1d11ba1e78f..e8504283d32 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = IteratorFromStringHandle.OP_NAME, inputsClass = IteratorFromStringHandle.Inputs.class ) +@Operator( + group = "data" +) public final class IteratorFromStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java index 8e9e0992a17..a30963f9e6d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = IteratorGetDevice.OP_NAME, inputsClass = IteratorGetDevice.Inputs.class ) +@Operator( + group = "data" +) public final class IteratorGetDevice extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetModelProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetModelProto.java new file mode 100644 index 00000000000..1ad0de4c183 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetModelProto.java @@ -0,0 +1,102 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; + +/** + * Returns the serialized model proto of an iterator resource. + * Returns the serialized model proto of an iterator resource. + */ +@OpMetadata( + opType = IteratorGetModelProto.OP_NAME, + inputsClass = IteratorGetModelProto.Inputs.class +) +public final class IteratorGetModelProto extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "IteratorGetModelProto"; + + private Output modelProto; + + public IteratorGetModelProto(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + modelProto = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new IteratorGetModelProto operation. + * + * @param scope current scope + * @param iterator An resource from an dataset iterator. + * @return a new instance of IteratorGetModelProto + */ + @Endpoint( + describeByClass = true + ) + public static IteratorGetModelProto create(Scope scope, Operand iterator) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IteratorGetModelProto"); + opBuilder.addInput(iterator.asOutput()); + return new IteratorGetModelProto(opBuilder.build()); + } + + /** + * Gets modelProto. + * A serialized model proto. + * @return modelProto. + */ + public Output modelProto() { + return modelProto; + } + + @Override + public Output asOutput() { + return modelProto; + } + + @OpInputsMetadata( + outputsClass = IteratorGetModelProto.class + ) + public static class Inputs extends RawOpInputs { + /** + * An resource from an dataset iterator. + */ + public final Operand iterator; + + public Inputs(GraphOperation op) { + super(new IteratorGetModelProto(op), op, Arrays.asList()); + int inputIndex = 0; + iterator = (Operand) op.input(inputIndex++); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java index e7a66350f0c..131903f2fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes rectified linear gradients for a LeakyRelu operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = LeakyReluGrad.OP_NAME, inputsClass = LeakyReluGrad.Inputs.class ) +@Operator( + group = "data" +) public final class LeakyReluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListDataset.java index 45b9d31c0b1..76db7fe0eac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = ListDataset.OP_NAME, inputsClass = ListDataset.Inputs.class ) +@Operator( + group = "data" +) public final class ListDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java new file mode 100644 index 00000000000..0fe1bbb447b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java @@ -0,0 +1,132 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; + +/** + * The ListSnapshotChunksDataset operation + */ +@OpMetadata( + opType = ListSnapshotChunksDataset.OP_NAME, + inputsClass = ListSnapshotChunksDataset.Inputs.class +) +@Operator( + group = "data" +) +public final class ListSnapshotChunksDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ListSnapshotChunksDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + public ListSnapshotChunksDataset(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ListSnapshotChunksDataset operation. + * + * @param scope current scope + * @param snapshotPath The snapshotPath value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ListSnapshotChunksDataset + */ + @Endpoint( + describeByClass = true + ) + public static ListSnapshotChunksDataset create(Scope scope, Operand snapshotPath, + List> outputTypes, List outputShapes) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ListSnapshotChunksDataset"); + opBuilder.addInput(snapshotPath.asOutput()); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + return new ListSnapshotChunksDataset(opBuilder.build()); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + @OpInputsMetadata( + outputsClass = ListSnapshotChunksDataset.class + ) + public static class Inputs extends RawOpInputs { + /** + * The snapshotPath input + */ + public final Operand snapshotPath; + + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + + public Inputs(GraphOperation op) { + super(new ListSnapshotChunksDataset(op), op, Arrays.asList("output_types", "output_shapes")); + int inputIndex = 0; + snapshotPath = (Operand) op.input(inputIndex++); + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java index 6e8ca298f38..4b6e7355a51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java @@ -98,6 +98,9 @@ public static MapDataset create(Scope scope, Operand inputDatas if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.forceSynchronous != null) { + opBuilder.setAttr("force_synchronous", opts.forceSynchronous); + } if (opts.metadata != null) { opBuilder.setAttr("metadata", opts.metadata); } @@ -126,6 +129,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the forceSynchronous option. + * + * @param forceSynchronous the forceSynchronous option + * @return this Options instance. + */ + public static Options forceSynchronous(Boolean forceSynchronous) { + return new Options().forceSynchronous(forceSynchronous); + } + /** * Sets the metadata option. * @@ -159,6 +172,8 @@ public static class Options { private Boolean preserveCardinality; + private Boolean forceSynchronous; + private String metadata; private Options() { @@ -186,6 +201,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { return this; } + /** + * Sets the forceSynchronous option. + * + * @param forceSynchronous the forceSynchronous option + * @return this Options instance. + */ + public Options forceSynchronous(Boolean forceSynchronous) { + this.forceSynchronous = forceSynchronous; + return this; + } + /** * Sets the metadata option. * @@ -237,13 +263,18 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The forceSynchronous attribute + */ + public final boolean forceSynchronous; + /** * The metadata attribute */ public final String metadata; public Inputs(GraphOperation op) { - super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality", "metadata")); + super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality", "force_synchronous", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -254,6 +285,7 @@ public Inputs(GraphOperation op) { outputShapes = op.attributes().getAttrShapeList("output_shapes"); useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + forceSynchronous = op.attributes().getAttrBool("force_synchronous"); metadata = op.attributes().getAttrString("metadata"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java index 7fe8aa712a0..467acc47e4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = MultiDeviceIterator.OP_NAME, inputsClass = MultiDeviceIterator.Inputs.class ) +@Operator( + group = "data" +) public final class MultiDeviceIterator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java index b9963f2cbfe..20cfc4f1010 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = MultiDeviceIteratorFromStringHandle.OP_NAME, inputsClass = MultiDeviceIteratorFromStringHandle.Inputs.class ) +@Operator( + group = "data" +) public final class MultiDeviceIteratorFromStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java index 57410d3f0f1..642935599c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -45,6 +46,9 @@ opType = MultiDeviceIteratorGetNextFromShard.OP_NAME, inputsClass = MultiDeviceIteratorGetNextFromShard.Inputs.class ) +@Operator( + group = "data" +) public final class MultiDeviceIteratorGetNextFromShard extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java index f1866ce63da..8cbc7b35693 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = MultiDeviceIteratorInit.OP_NAME, inputsClass = MultiDeviceIteratorInit.Inputs.class ) +@Operator( + group = "data" +) public final class MultiDeviceIteratorInit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java index 5eebe6521f7..cc856b4ecc9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = MultiDeviceIteratorToStringHandle.OP_NAME, inputsClass = MultiDeviceIteratorToStringHandle.Inputs.class ) +@Operator( + group = "data" +) public final class MultiDeviceIteratorToStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java index 596ed69844b..529b1b93eaa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java @@ -153,7 +153,7 @@ public static class Inputs extends RawOpInputs { public final Operand inputDataset; /** - * A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer. + * A {@code tf.string} scalar {@code tf.Tensor} of serialized {@code tf.data.Options} protocol buffer. */ public final String serializedOptions; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelFilterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelFilterDataset.java index 9d8b307f2ff..f87d2a27269 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelFilterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelFilterDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -52,6 +53,9 @@ opType = ParallelFilterDataset.OP_NAME, inputsClass = ParallelFilterDataset.Inputs.class ) +@Operator( + group = "data" +) public final class ParallelFilterDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -215,8 +219,8 @@ public static class Inputs extends RawOpInputs { * A string indicating the op-level determinism to use. Deterministic controls * whether the interleave is allowed to return elements out of order if the next * element to be returned isn't available, but a later element is. Options are - * "true", "false", and "default". "default" indicates that determinism should be - * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * "true", "false", and "default". "default" indicates that determinism should be + * decided by the {@code experimental_deterministic} parameter of {@code tf.data.Options}. */ public final String deterministic; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java index 17ae267ff18..d46fd839fc4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java @@ -265,13 +265,13 @@ public static class Inputs extends RawOpInputs { * A string indicating the op-level determinism to use. Deterministic controls * whether the interleave is allowed to return elements out of order if the next * element to be returned isn't available, but a later element is. Options are - * "true", "false", and "default". "default" indicates that determinism should be - * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * "true", "false", and "default". "default" indicates that determinism should be + * decided by the {@code experimental_deterministic} parameter of {@code tf.data.Options}. */ public final String deterministic; /** - * Types of the elements of `other_arguments`. + * Types of the elements of {@code other_arguments}. */ public final DataType[] Targuments; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java index 68e97058b5c..6b783929411 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java @@ -107,6 +107,9 @@ public static ParallelMapDataset create(Scope scope, Operand in if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.useUnboundedThreadpool != null) { + opBuilder.setAttr("use_unbounded_threadpool", opts.useUnboundedThreadpool); + } if (opts.metadata != null) { opBuilder.setAttr("metadata", opts.metadata); } @@ -145,6 +148,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the useUnboundedThreadpool option. + * + * @param useUnboundedThreadpool the useUnboundedThreadpool option + * @return this Options instance. + */ + public static Options useUnboundedThreadpool(Boolean useUnboundedThreadpool) { + return new Options().useUnboundedThreadpool(useUnboundedThreadpool); + } + /** * Sets the metadata option. * @@ -180,6 +193,8 @@ public static class Options { private Boolean preserveCardinality; + private Boolean useUnboundedThreadpool; + private String metadata; private Options() { @@ -218,6 +233,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { return this; } + /** + * Sets the useUnboundedThreadpool option. + * + * @param useUnboundedThreadpool the useUnboundedThreadpool option + * @return this Options instance. + */ + public Options useUnboundedThreadpool(Boolean useUnboundedThreadpool) { + this.useUnboundedThreadpool = useUnboundedThreadpool; + return this; + } + /** * Sets the metadata option. * @@ -280,13 +306,18 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The useUnboundedThreadpool attribute + */ + public final boolean useUnboundedThreadpool; + /** * The metadata attribute */ public final String metadata; public Inputs(GraphOperation op) { - super(new ParallelMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "deterministic", "preserve_cardinality", "metadata")); + super(new ParallelMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "deterministic", "preserve_cardinality", "use_unbounded_threadpool", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -299,6 +330,7 @@ public Inputs(GraphOperation op) { useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); deterministic = op.attributes().getAttrString("deterministic"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + useUnboundedThreadpool = op.attributes().getAttrBool("use_unbounded_threadpool"); metadata = op.attributes().getAttrString("metadata"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java index 64662ec8ce3..c50ec51e906 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java @@ -268,7 +268,7 @@ public static class Inputs extends RawOpInputs { /** * A list of string keys in the examples features. - * The results for these keys will be returned as `SparseTensor` objects. + * The results for these keys will be returned as {@code SparseTensor} objects. */ public final String[] sparseKeys; @@ -279,23 +279,23 @@ public static class Inputs extends RawOpInputs { public final String[] denseKeys; /** - * A list of `DTypes` of the same length as `sparse_keys`. - * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - * and `tf.string` (`BytesList`) are supported. + * A list of {@code DTypes} of the same length as {@code sparse_keys}. + * Only {@code tf.float32} ({@code FloatList}), {@code tf.int64} ({@code Int64List}), + * and {@code tf.string} ({@code BytesList}) are supported. */ public final DataType[] sparseTypes; /** - * A list of DTypes of the same length as `dense_keys`. - * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - * and `tf.string` (`BytesList`) are supported. + * A list of DTypes of the same length as {@code dense_keys}. + * Only {@code tf.float32} ({@code FloatList}), {@code tf.int64} ({@code Int64List}), + * and {@code tf.string} ({@code BytesList}) are supported. */ public final DataType[] Tdense; /** - * List of tuples with the same length as `dense_keys`. - * The shape of the data for each dense feature referenced by `dense_keys`. - * Required for any input tensors identified by `dense_keys`. Must be + * List of tuples with the same length as {@code dense_keys}. + * The shape of the data for each dense feature referenced by {@code dense_keys}. + * Required for any input tensors identified by {@code dense_keys}. Must be * either fully defined, or may contain an unknown first dimension. * An unknown first dimension means the feature is treated as having * a variable number of blocks, and the output shape along this dimension @@ -319,8 +319,8 @@ public static class Inputs extends RawOpInputs { * A string indicating the op-level determinism to use. Deterministic controls * whether the dataset is allowed to return elements out of order if the next * element to be returned isn't available, but a later element is. Options are - * "true", "false", and "default". "default" indicates that determinism should be - * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * "true", "false", and "default". "default" indicates that determinism should be + * decided by the {@code experimental_deterministic} parameter of {@code tf.data.Options}. */ public final String deterministic; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RewriteDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RewriteDataset.java index 9e0c1449324..d67c7d6e808 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RewriteDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RewriteDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = RewriteDataset.OP_NAME, inputsClass = RewriteDataset.Inputs.class ) +@Operator( + group = "data" +) public final class RewriteDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotChunkDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotChunkDataset.java index 71c3718f840..df6172f9075 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotChunkDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotChunkDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = SnapshotChunkDataset.OP_NAME, inputsClass = SnapshotChunkDataset.Inputs.class ) +@Operator( + group = "data" +) public final class SnapshotChunkDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java index 45b14fb5995..8a20a594bcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -44,6 +45,9 @@ opType = SnapshotDatasetReader.OP_NAME, inputsClass = SnapshotDatasetReader.Inputs.class ) +@Operator( + group = "data" +) public final class SnapshotDatasetReader extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java index 7c288397398..3d60a2cc237 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = SnapshotNestedDatasetReader.OP_NAME, inputsClass = SnapshotNestedDatasetReader.Inputs.class ) +@Operator( + group = "data" +) public final class SnapshotNestedDatasetReader extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java index 33d218181e5..080585d34a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = StatsAggregatorHandle.OP_NAME, inputsClass = StatsAggregatorHandle.Inputs.class ) +@Operator( + group = "data" +) public final class StatsAggregatorHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java index c3310008cdd..c610409f62a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = StatsAggregatorSetSummaryWriter.OP_NAME, inputsClass = StatsAggregatorSetSummaryWriter.Inputs.class ) +@Operator( + group = "data" +) public final class StatsAggregatorSetSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java index 6ae9cfdc470..8e110f97a30 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = ThreadPoolHandle.OP_NAME, inputsClass = ThreadPoolHandle.Inputs.class ) +@Operator( + group = "data" +) public final class ThreadPoolHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java index 22754dce45d..9c871ae7b08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = UncompressElement.OP_NAME, inputsClass = UncompressElement.Inputs.class ) +@Operator( + group = "data" +) public final class UncompressElement extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WeightedFlatMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WeightedFlatMapDataset.java new file mode 100644 index 00000000000..2f97c1e168c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WeightedFlatMapDataset.java @@ -0,0 +1,186 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.data; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.family.TType; + +/** + * The WeightedFlatMapDataset operation + */ +@OpMetadata( + opType = WeightedFlatMapDataset.OP_NAME, + inputsClass = WeightedFlatMapDataset.Inputs.class +) +public final class WeightedFlatMapDataset extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "WeightedFlatMapDataset"; + + private Output handle; + + @SuppressWarnings("unchecked") + public WeightedFlatMapDataset(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new WeightedFlatMapDataset operation. + * + * @param scope current scope + * @param inputDatasets The inputDatasets value + * @param weights The weights value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of WeightedFlatMapDataset + */ + @Endpoint( + describeByClass = true + ) + public static WeightedFlatMapDataset create(Scope scope, + Iterable> inputDatasets, Iterable> weights, + List> outputTypes, List outputShapes, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "WeightedFlatMapDataset"); + opBuilder.addInputList(Operands.asOutputs(inputDatasets)); + opBuilder.addInputList(Operands.asOutputs(weights)); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } + return new WeightedFlatMapDataset(opBuilder.build()); + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.data.WeightedFlatMapDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + + @OpInputsMetadata( + outputsClass = WeightedFlatMapDataset.class + ) + public static class Inputs extends RawOpInputs { + /** + * The inputDatasets input + */ + public final Iterable> inputDatasets; + + /** + * The weights input + */ + public final Iterable> weights; + + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + + /** + * The metadata attribute + */ + public final String metadata; + + public Inputs(GraphOperation op) { + super(new WeightedFlatMapDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); + int inputIndex = 0; + int inputDatasetsLength = op.inputListLength("input_datasets"); + inputDatasets = Arrays.asList((Operand[]) op.inputList(inputIndex, inputDatasetsLength)); + inputIndex += inputDatasetsLength; + int weightsLength = op.inputListLength("weights"); + weights = Arrays.asList((Operand[]) op.inputList(inputIndex, weightsLength)); + inputIndex += weightsLength; + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java index a1cfcaf853e..74097c53e64 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = WindowOp.OP_NAME, inputsClass = WindowOp.Inputs.class ) +@Operator( + group = "data" +) public final class WindowOp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java index f6f252f3ad5..92f21d06c07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = AssertNextDataset.OP_NAME, inputsClass = AssertNextDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class AssertNextDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java index 9e8615898f6..8c22e6fa9e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -49,6 +50,9 @@ opType = AutoShardDataset.OP_NAME, inputsClass = AutoShardDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class AutoShardDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java index 92f1428e199..d98279464ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = BytesProducedStatsDataset.OP_NAME, inputsClass = BytesProducedStatsDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class BytesProducedStatsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java index ad981a6f0c9..43e1d9d58a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -45,6 +46,9 @@ opType = CSVDataset.OP_NAME, inputsClass = CSVDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class CSVDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java index 640209ea50f..cab5575a928 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = ChooseFastestDataset.OP_NAME, inputsClass = ChooseFastestDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ChooseFastestDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java index 49f100d73a6..d73785b9a91 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = DatasetCardinality.OP_NAME, inputsClass = DatasetCardinality.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class DatasetCardinality extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java index 27e393a66a1..35889dec71c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,6 +39,9 @@ opType = DatasetToTFRecord.OP_NAME, inputsClass = DatasetToTFRecord.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class DatasetToTFRecord extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java index 59992f2522c..414bb684323 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = DenseToSparseBatchDataset.OP_NAME, inputsClass = DenseToSparseBatchDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class DenseToSparseBatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java index 04a72437d9b..6d4464a7746 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = DirectedInterleaveDataset.OP_NAME, inputsClass = DirectedInterleaveDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class DirectedInterleaveDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java index e56e2dbc49e..db37b2fc4e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +45,9 @@ opType = GroupByReducerDataset.OP_NAME, inputsClass = GroupByReducerDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class GroupByReducerDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java index 33db47df4f6..a24b41f050d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +45,9 @@ opType = GroupByWindowDataset.OP_NAME, inputsClass = GroupByWindowDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class GroupByWindowDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java index def6476c057..26f1eabead2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = IgnoreErrorsDataset.OP_NAME, inputsClass = IgnoreErrorsDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class IgnoreErrorsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java index 6f6d6f70c30..b7dde3d078e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = IteratorGetDevice.OP_NAME, inputsClass = IteratorGetDevice.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class IteratorGetDevice extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java index bbb6ef677c9..582fde7e038 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = LatencyStatsDataset.OP_NAME, inputsClass = LatencyStatsDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class LatencyStatsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java index 2d29444b0cc..5bbb82db008 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = LmdbDataset.OP_NAME, inputsClass = LmdbDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class LmdbDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java index ced18243fe8..18ac4e051f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -49,6 +50,9 @@ opType = MapAndBatchDataset.OP_NAME, inputsClass = MapAndBatchDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class MapAndBatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java index 8ce3160e9b1..7c8cfafc8f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = MapDataset.OP_NAME, inputsClass = MapDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class MapDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -94,6 +98,9 @@ public static MapDataset create(Scope scope, Operand inputDatas if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.forceSynchronous != null) { + opBuilder.setAttr("force_synchronous", opts.forceSynchronous); + } } } return new MapDataset(opBuilder.build()); @@ -119,6 +126,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the forceSynchronous option. + * + * @param forceSynchronous the forceSynchronous option + * @return this Options instance. + */ + public static Options forceSynchronous(Boolean forceSynchronous) { + return new Options().forceSynchronous(forceSynchronous); + } + /** * Gets handle. * @@ -142,6 +159,8 @@ public static class Options { private Boolean preserveCardinality; + private Boolean forceSynchronous; + private Options() { } @@ -166,6 +185,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { this.preserveCardinality = preserveCardinality; return this; } + + /** + * Sets the forceSynchronous option. + * + * @param forceSynchronous the forceSynchronous option + * @return this Options instance. + */ + public Options forceSynchronous(Boolean forceSynchronous) { + this.forceSynchronous = forceSynchronous; + return this; + } } @OpInputsMetadata( @@ -207,8 +237,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The forceSynchronous attribute + */ + public final boolean forceSynchronous; + public Inputs(GraphOperation op) { - super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality")); + super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality", "force_synchronous")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -219,6 +254,7 @@ public Inputs(GraphOperation op) { outputShapes = op.attributes().getAttrShapeList("output_shapes"); useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + forceSynchronous = op.attributes().getAttrBool("force_synchronous"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java index ccdce76b4d2..37486a2a506 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = MatchingFilesDataset.OP_NAME, inputsClass = MatchingFilesDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class MatchingFilesDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java index 87b17b10b50..e8d73813cbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = MaxIntraOpParallelismDataset.OP_NAME, inputsClass = MaxIntraOpParallelismDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class MaxIntraOpParallelismDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java index 37a41cce38f..fcfeda256ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = NonSerializableDataset.OP_NAME, inputsClass = NonSerializableDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class NonSerializableDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java index fc7905dd5e9..df03bf4d26a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -51,6 +52,9 @@ opType = ParallelInterleaveDataset.OP_NAME, inputsClass = ParallelInterleaveDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ParallelInterleaveDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java index 5bbc30a8fe2..28c138c0032 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = ParseExampleDataset.OP_NAME, inputsClass = ParseExampleDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ParseExampleDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -198,7 +202,7 @@ public static class Inputs extends RawOpInputs { /** * A list of string keys in the examples features. - * The results for these keys will be returned as `SparseTensor` objects. + * The results for these keys will be returned as {@code SparseTensor} objects. */ public final String[] sparseKeys; @@ -209,23 +213,23 @@ public static class Inputs extends RawOpInputs { public final String[] denseKeys; /** - * A list of `DTypes` of the same length as `sparse_keys`. - * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - * and `tf.string` (`BytesList`) are supported. + * A list of {@code DTypes} of the same length as {@code sparse_keys}. + * Only {@code tf.float32} ({@code FloatList}), {@code tf.int64} ({@code Int64List}), + * and {@code tf.string} ({@code BytesList}) are supported. */ public final DataType[] sparseTypes; /** - * A list of DTypes of the same length as `dense_keys`. - * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - * and `tf.string` (`BytesList`) are supported. + * A list of DTypes of the same length as {@code dense_keys}. + * Only {@code tf.float32} ({@code FloatList}), {@code tf.int64} ({@code Int64List}), + * and {@code tf.string} ({@code BytesList}) are supported. */ public final DataType[] Tdense; /** - * List of tuples with the same length as `dense_keys`. - * The shape of the data for each dense feature referenced by `dense_keys`. - * Required for any input tensors identified by `dense_keys`. Must be + * List of tuples with the same length as {@code dense_keys}. + * The shape of the data for each dense feature referenced by {@code dense_keys}. + * Required for any input tensors identified by {@code dense_keys}. Must be * either fully defined, or may contain an unknown first dimension. * An unknown first dimension means the feature is treated as having * a variable number of blocks, and the output shape along this dimension diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java index 3b6f70a5f6d..f555de177d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = PrivateThreadPoolDataset.OP_NAME, inputsClass = PrivateThreadPoolDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class PrivateThreadPoolDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java index 0e0b1581f7f..ca62de342d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = RandomDataset.OP_NAME, inputsClass = RandomDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class RandomDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java index d837c94a029..f83fa7d5bc6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -45,6 +46,9 @@ opType = RebatchDataset.OP_NAME, inputsClass = RebatchDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class RebatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java index 77e67422992..782889a68ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = ScanDataset.OP_NAME, inputsClass = ScanDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ScanDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java index b661b04b49b..1d083fa51b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = SetStatsAggregatorDataset.OP_NAME, inputsClass = SetStatsAggregatorDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class SetStatsAggregatorDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java index 4888318d074..8b494e8d2bd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = SleepDataset.OP_NAME, inputsClass = SleepDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class SleepDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java index 926a05dd1e6..71063d28d11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = SlidingWindowDataset.OP_NAME, inputsClass = SlidingWindowDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class SlidingWindowDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java index 0fd9d24ac41..906ad5aeed0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = SqlDataset.OP_NAME, inputsClass = SqlDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class SqlDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java index 3e7e84f0ad3..23dea9a06da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = StatsAggregatorHandle.OP_NAME, inputsClass = StatsAggregatorHandle.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class StatsAggregatorHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java index 66340c83c40..ee33f0944f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = StatsAggregatorSummary.OP_NAME, inputsClass = StatsAggregatorSummary.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class StatsAggregatorSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java index db4b8d19649..2d3097055fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -49,6 +50,9 @@ opType = TakeWhileDataset.OP_NAME, inputsClass = TakeWhileDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class TakeWhileDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java index 6e9999b4216..5ceeb9f1745 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = ThreadPoolDataset.OP_NAME, inputsClass = ThreadPoolDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ThreadPoolDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java index 767c26aabfd..edcfe5cd3b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = ThreadPoolHandle.OP_NAME, inputsClass = ThreadPoolHandle.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class ThreadPoolHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java index e0d19f90c9d..6f207dabb59 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = UnbatchDataset.OP_NAME, inputsClass = UnbatchDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class UnbatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java index ad60b1fade5..9ac956cc829 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = UniqueDataset.OP_NAME, inputsClass = UniqueDataset.Inputs.class ) +@Operator( + group = "data.experimental" +) public final class UniqueDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java index 2067d99bf77..86215fa9a9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -38,13 +39,14 @@ * that are not a number (NaN) or infinity (Inf). Otherwise, returns the input * tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf * in the errors it throws. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CheckNumerics.OP_NAME, inputsClass = CheckNumerics.Inputs.class ) +@Operator( + group = "debugging" +) public final class CheckNumerics extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java index 37f2fec7d91..776a971ef27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java @@ -37,8 +37,6 @@ * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on non-reference-type tensors. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugGradientIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java index 5071299a66a..76a9e9029ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java @@ -37,8 +37,6 @@ * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on reference-type tensors. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugGradientRefIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java index 89854abdc1d..10edd71d4b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java @@ -36,8 +36,6 @@ /** * Provides an identity mapping of the non-Ref type input tensor for debugging. * Provides an identity mapping of the non-Ref type input tensor for debugging. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugIdentity.OP_NAME, @@ -364,17 +362,17 @@ public static class Inputs extends RawOpInputs /** * List of URLs to debug targets, e.g., - * file:///foo/tfdbg_dump, grpc:://localhost:11011 + * file:///foo/tfdbg_dump, grpc:://localhost:11011 */ public final String[] debugUrls; /** * Whether this op will be gated. If any of the debug_urls of this - * debug node is of the grpc:// scheme, when the value of this attribute is set - * to True, the data will not actually be sent via the grpc stream unless this - * debug op has been enabled at the debug_url. If all of the debug_urls of this - * debug node are of the grpc:// scheme and the debug op is enabled at none of - * them, the output will be an empty Tensor. + * debug node is of the grpc:// scheme, when the value of this attribute is set + * to True, the data will not actually be sent via the grpc stream unless this + * debug op has been enabled at the debug_url. If all of the debug_urls of this + * debug node are of the grpc:// scheme and the debug op is enabled at none of + * them, the output will be an empty Tensor. */ public final boolean gatedGrpc; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java index 4bc52436f35..2d0e2f5fe94 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java @@ -269,17 +269,17 @@ public static class Inputs extends RawOpInputs { /** * List of URLs to debug targets, e.g., - * file:///foo/tfdbg_dump, grpc:://localhost:11011. + * file:///foo/tfdbg_dump, grpc:://localhost:11011. */ public final String[] debugUrls; /** - * Whether this op will be gated. If any of the debug_urls of this - * debug node is of the grpc:// scheme, when the value of this attribute is set - * to True, the data will not actually be sent via the grpc stream unless this - * debug op has been enabled at the debug_url. If all of the debug_urls of this - * debug node are of the grpc:// scheme and the debug op is enabled at none of - * them, the output will be an empty Tensor. + * Whether this op will be gated. If any of the debug_urls of this + * debug node is of the grpc:// scheme, when the value of this attribute is set + * to True, the data will not actually be sent via the grpc stream unless this + * debug op has been enabled at the debug_url. If all of the debug_urls of this + * debug node are of the grpc:// scheme and the debug op is enabled at none of + * them, the output will be an empty Tensor. */ public final boolean gatedGrpc; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java index 416438128d2..4ff0f11c7bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java @@ -40,8 +40,6 @@ * Computes a numeric summary of the input tensor. The shape of the output * depends on the tensor_debug_mode attribute. * This op is used internally by TensorFlow Debugger (tfdbg) v2. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugNumericsSummary.OP_NAME, @@ -291,59 +289,53 @@ public static class Inputs extends RawOpInputs> { /** * Tensor debug mode: the mode in which the input tensor is summarized - * by the op. See the TensorDebugMode enum in - * tensorflow/core/protobuf/debug_event.proto for details. - * - * Supported values: - * 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st - * element is the tensor_id, if provided, and -1 otherwise. The 2nd - * element is a bit which is set to 1 if the input tensor has an - * infinity or nan value, or zero otherwise. - * - * 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st - * element is the tensor_id, if provided, and -1 otherwise. The - * remaining four slots are the total number of elements, -infs, - * +infs, and nans in the input tensor respectively. - * - * 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st - * element is the tensor_id, if provided, and -1 otherwise. The 2nd - * element is the device_id, if provided, and -1 otherwise. The 3rd - * element holds the datatype value of the input tensor as according - * to the enumerated type in tensorflow/core/framework/types.proto. - * The remaining elements hold the total number of elements, -infs, - * +infs, nans, negative finite numbers, zeros, and positive finite - * numbers in the input tensor respectively. - * - * 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st - * element is the tensor_id, if provided, and -1 otherwise. The 2nd - * element holds the datatype value of the input tensor as according - * to the enumerated type in tensorflow/core/framework/types.proto. - * The 3rd element holds the rank of the tensor. The 4th element holds - * the number of elements within the tensor. Finally the remaining 6 - * elements hold the shape of the tensor. If the rank of the tensor - * is lower than 6, the shape is right padded with zeros. If the rank - * is greater than 6, the head of the shape is truncated. - * - * 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st - * element is the tensor_id, if provided, and -1 otherwise. The 2nd - * element is the device_id, if provided, and -1 otherwise. The 3rd - * element holds the datatype value of the input tensor as according - * to the enumerated type in tensorflow/core/framework/types.proto. - * The 4th element holds the rank of the tensor. The 5th to 11th - * elements hold the shape of the tensor. If the rank of the tensor - * is lower than 6, the shape is right padded with zeros. If the rank - * is greater than 6, the head of the shape is truncated. The 12th to - * 18th elements hold the number of elements, -infs, +infs, nans, - * denormal floats, negative finite numbers, zeros, and positive - * finite numbers in the input tensor respectively. The final four - * elements hold the min value, max value, mean, and variance of the - * input tensor. - * - * 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape - * [3]. The 1st element is -inf if any elements of the input tensor - * is -inf, or zero otherwise. The 2nd element is +inf if any elements - * of the input tensor is +inf, or zero otherwise. The 3rd element is - * nan if any element of the input tensor is nan, or zero otherwise. + * by the op. See the TensorDebugMode enum in + * tensorflow/core/protobuf/debug_event.proto for details. + *

      Supported values: + * 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st + * element is the tensor_id, if provided, and -1 otherwise. The 2nd + * element is a bit which is set to 1 if the input tensor has an + * infinity or nan value, or zero otherwise. + *

      3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st + * element is the tensor_id, if provided, and -1 otherwise. The + * remaining four slots are the total number of elements, -infs, + * +infs, and nans in the input tensor respectively. + *

      4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st + * element is the tensor_id, if provided, and -1 otherwise. The 2nd + * element is the device_id, if provided, and -1 otherwise. The 3rd + * element holds the datatype value of the input tensor as according + * to the enumerated type in tensorflow/core/framework/types.proto. + * The remaining elements hold the total number of elements, -infs, + * +infs, nans, negative finite numbers, zeros, and positive finite + * numbers in the input tensor respectively. + *

      5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st + * element is the tensor_id, if provided, and -1 otherwise. The 2nd + * element holds the datatype value of the input tensor as according + * to the enumerated type in tensorflow/core/framework/types.proto. + * The 3rd element holds the rank of the tensor. The 4th element holds + * the number of elements within the tensor. Finally the remaining 6 + * elements hold the shape of the tensor. If the rank of the tensor + * is lower than 6, the shape is right padded with zeros. If the rank + * is greater than 6, the head of the shape is truncated. + *

      6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st + * element is the tensor_id, if provided, and -1 otherwise. The 2nd + * element is the device_id, if provided, and -1 otherwise. The 3rd + * element holds the datatype value of the input tensor as according + * to the enumerated type in tensorflow/core/framework/types.proto. + * The 4th element holds the rank of the tensor. The 5th to 11th + * elements hold the shape of the tensor. If the rank of the tensor + * is lower than 6, the shape is right padded with zeros. If the rank + * is greater than 6, the head of the shape is truncated. The 12th to + * 18th elements hold the number of elements, -infs, +infs, nans, + * denormal floats, negative finite numbers, zeros, and positive + * finite numbers in the input tensor respectively. The final four + * elements hold the min value, max value, mean, and variance of the + * input tensor. + *

      8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape + * [3]. The 1st element is -inf if any elements of the input tensor + * is -inf, or zero otherwise. The 2nd element is +inf if any elements + * of the input tensor is +inf, or zero otherwise. The 3rd element is + * nan if any element of the input tensor is nan, or zero otherwise. */ public final long tensorDebugMode; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java index 6f804d8de1d..7cc17dd9d36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -44,13 +45,14 @@ * reduction: the reduction operation to perform. * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. - * - * @param data type for {@code data} output */ @OpMetadata( opType = NcclAllReduce.OP_NAME, inputsClass = NcclAllReduce.Inputs.class ) +@Operator( + group = "distribute" +) public final class NcclAllReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java index fe88425d478..41a2050e44f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -41,13 +42,14 @@ *

      input: The input to the broadcast. * output: The same as input. * shape: The shape of the input tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NcclBroadcast.OP_NAME, inputsClass = NcclBroadcast.Inputs.class ) +@Operator( + group = "distribute" +) public final class NcclBroadcast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java index 76d50c1ce3e..8fcf62bf4cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -41,13 +42,14 @@ *

      input: The input to the reduction. * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. - * - * @param data type for {@code data} output */ @OpMetadata( opType = NcclReduce.OP_NAME, inputsClass = NcclReduce.Inputs.class ) +@Operator( + group = "distribute" +) public final class NcclReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java index 77ba7493eb7..1f9b0285c6c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java @@ -272,7 +272,7 @@ public static class Inputs extends RawOpInputs { /** * The post-decimal precision to use for floating point numbers. - * Only used if precision > -1. + * Only used if precision > -1. */ public final long precision; @@ -290,12 +290,12 @@ public static class Inputs extends RawOpInputs { /** * Pad pre-decimal numbers to this width. * Applies to both floating point and integer numbers. - * Only used if width > -1. + * Only used if width > -1. */ public final long width; /** - * The value to pad if width > -1. If empty, pads with spaces. + * The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. */ public final String fill; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java index 806ad99e2ea..af516490d88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java @@ -36,8 +36,6 @@ /** * Cast x of type SrcT to y of DstT. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java index 6b0a717157c..0da2678549f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java @@ -48,8 +48,6 @@ * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * - * - * @param data type for {@code out} output */ @OpMetadata( opType = Complex.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java index 48f9f4be62f..0db30e9ca62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -55,6 +56,9 @@ opType = ToBool.OP_NAME, inputsClass = ToBool.Inputs.class ) +@Operator( + group = "dtypes" +) public final class ToBool extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java deleted file mode 100644 index 4fa32ac6dd7..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java +++ /dev/null @@ -1,145 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; - -/** - * Aggregates the summary of accumulated stats for the batch. - * The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket. - */ -@OpMetadata( - opType = BoostedTreesAggregateStats.OP_NAME, - inputsClass = BoostedTreesAggregateStats.Inputs.class -) -public final class BoostedTreesAggregateStats extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesAggregateStats"; - - private Output statsSummary; - - public BoostedTreesAggregateStats(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - statsSummary = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesAggregateStats operation. - * - * @param scope current scope - * @param nodeIds int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - * @param gradients float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - * @param hessians float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - * @param feature int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). - * @param maxSplits int; the maximum number of splits possible in the whole tree. - * @param numBuckets int; equals to the maximum possible value of bucketized feature. - * @return a new instance of BoostedTreesAggregateStats - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesAggregateStats create(Scope scope, Operand nodeIds, - Operand gradients, Operand hessians, Operand feature, - Long maxSplits, Long numBuckets) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesAggregateStats"); - opBuilder.addInput(nodeIds.asOutput()); - opBuilder.addInput(gradients.asOutput()); - opBuilder.addInput(hessians.asOutput()); - opBuilder.addInput(feature.asOutput()); - opBuilder.setAttr("max_splits", maxSplits); - opBuilder.setAttr("num_buckets", numBuckets); - return new BoostedTreesAggregateStats(opBuilder.build()); - } - - /** - * Gets statsSummary. - * output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension]) - * containing accumulated stats for each node, feature dimension and bucket. - * @return statsSummary. - */ - public Output statsSummary() { - return statsSummary; - } - - @Override - public Output asOutput() { - return statsSummary; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesAggregateStats.class - ) - public static class Inputs extends RawOpInputs { - /** - * int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - */ - public final Operand nodeIds; - - /** - * float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - */ - public final Operand gradients; - - /** - * float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - */ - public final Operand hessians; - - /** - * int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). - */ - public final Operand feature; - - /** - * int; the maximum number of splits possible in the whole tree. - */ - public final long maxSplits; - - /** - * int; equals to the maximum possible value of bucketized feature. - */ - public final long numBuckets; - - public Inputs(GraphOperation op) { - super(new BoostedTreesAggregateStats(op), op, Arrays.asList("max_splits", "num_buckets")); - int inputIndex = 0; - nodeIds = (Operand) op.input(inputIndex++); - gradients = (Operand) op.input(inputIndex++); - hessians = (Operand) op.input(inputIndex++); - feature = (Operand) op.input(inputIndex++); - maxSplits = op.attributes().getAttrInt("max_splits"); - numBuckets = op.attributes().getAttrInt("num_buckets"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java deleted file mode 100644 index ee2aecbace0..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; - -/** - * Bucketize each feature based on bucket boundaries. - * An op that returns a list of float tensors, where each tensor represents the - * bucketized values for a single feature. - */ -@OpMetadata( - opType = BoostedTreesBucketize.OP_NAME, - inputsClass = BoostedTreesBucketize.Inputs.class -) -public final class BoostedTreesBucketize extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesBucketize"; - - private List> buckets; - - @SuppressWarnings("unchecked") - public BoostedTreesBucketize(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int bucketsLength = operation.outputListLength("buckets"); - buckets = Arrays.asList((Output[]) operation.outputList(outputIdx, bucketsLength)); - outputIdx += bucketsLength; - } - - /** - * Factory method to create a class wrapping a new BoostedTreesBucketize operation. - * - * @param scope current scope - * @param floatValues float; List of Rank 1 Tensor each containing float values for a single feature. - * @param bucketBoundaries float; List of Rank 1 Tensors each containing the bucket boundaries for a single - * feature. - * @return a new instance of BoostedTreesBucketize - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesBucketize create(Scope scope, Iterable> floatValues, - Iterable> bucketBoundaries) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesBucketize"); - opBuilder.addInputList(Operands.asOutputs(floatValues)); - opBuilder.addInputList(Operands.asOutputs(bucketBoundaries)); - return new BoostedTreesBucketize(opBuilder.build()); - } - - /** - * Gets buckets. - * int; List of Rank 1 Tensors each containing the bucketized values for a single feature. - * @return buckets. - */ - public List> buckets() { - return buckets; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) buckets.iterator(); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesBucketize.class - ) - public static class Inputs extends RawOpInputs { - /** - * float; List of Rank 1 Tensor each containing float values for a single feature. - */ - public final Iterable> floatValues; - - /** - * float; List of Rank 1 Tensors each containing the bucket boundaries for a single - * feature. - */ - public final Iterable> bucketBoundaries; - - public Inputs(GraphOperation op) { - super(new BoostedTreesBucketize(op), op, Arrays.asList()); - int inputIndex = 0; - int floatValuesLength = op.inputListLength("float_values"); - floatValues = Arrays.asList((Operand[]) op.inputList(inputIndex, floatValuesLength)); - inputIndex += floatValuesLength; - int bucketBoundariesLength = op.inputListLength("bucket_boundaries"); - bucketBoundaries = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketBoundariesLength)); - inputIndex += bucketBoundariesLength; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java deleted file mode 100644 index 7d988e1935e..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java +++ /dev/null @@ -1,272 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; - -/** - * Calculates gains for each feature and returns the best possible split information for the feature. - * The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - *

      It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return {@code node_ids_list} for each feature, containing the list of nodes that this feature can be used to split. - *

      In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - *

      The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. - */ -@OpMetadata( - opType = BoostedTreesCalculateBestFeatureSplit.OP_NAME, - inputsClass = BoostedTreesCalculateBestFeatureSplit.Inputs.class -) -public final class BoostedTreesCalculateBestFeatureSplit extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCalculateBestFeatureSplit"; - - private Output nodeIds; - - private Output gains; - - private Output featureDimensions; - - private Output thresholds; - - private Output leftNodeContribs; - - private Output rightNodeContribs; - - private Output splitWithDefaultDirections; - - public BoostedTreesCalculateBestFeatureSplit(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - nodeIds = operation.output(outputIdx++); - gains = operation.output(outputIdx++); - featureDimensions = operation.output(outputIdx++); - thresholds = operation.output(outputIdx++); - leftNodeContribs = operation.output(outputIdx++); - rightNodeContribs = operation.output(outputIdx++); - splitWithDefaultDirections = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCalculateBestFeatureSplit operation. - * - * @param scope current scope - * @param nodeIdRange A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - * @param statsSummary A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. - * The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - * @param l1 l1 regularization factor on leaf weights, per instance based. - * @param l2 l2 regularization factor on leaf weights, per instance based. - * @param treeComplexity adjustment to the gain, per leaf based. - * @param minNodeWeight minimum avg of hessians in a node before required for the node to be considered for splitting. - * @param logitsDimension The dimension of logit, i.e., number of classes. - * @param options carries optional attribute values - * @return a new instance of BoostedTreesCalculateBestFeatureSplit - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCalculateBestFeatureSplit create(Scope scope, - Operand nodeIdRange, Operand statsSummary, Operand l1, - Operand l2, Operand treeComplexity, Operand minNodeWeight, - Long logitsDimension, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCalculateBestFeatureSplit"); - opBuilder.addInput(nodeIdRange.asOutput()); - opBuilder.addInput(statsSummary.asOutput()); - opBuilder.addInput(l1.asOutput()); - opBuilder.addInput(l2.asOutput()); - opBuilder.addInput(treeComplexity.asOutput()); - opBuilder.addInput(minNodeWeight.asOutput()); - opBuilder.setAttr("logits_dimension", logitsDimension); - if (options != null) { - for (Options opts : options) { - if (opts.splitType != null) { - opBuilder.setAttr("split_type", opts.splitType); - } - } - } - return new BoostedTreesCalculateBestFeatureSplit(opBuilder.build()); - } - - /** - * Sets the splitType option. - * - * @param splitType A string indicating if this Op should perform inequality split or equality split. - * @return this Options instance. - */ - public static Options splitType(String splitType) { - return new Options().splitType(splitType); - } - - /** - * Gets nodeIds. - * A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. - * @return nodeIds. - */ - public Output nodeIds() { - return nodeIds; - } - - /** - * Gets gains. - * A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. - * @return gains. - */ - public Output gains() { - return gains; - } - - /** - * Gets featureDimensions. - * A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. - * @return featureDimensions. - */ - public Output featureDimensions() { - return featureDimensions; - } - - /** - * Gets thresholds. - * A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. - * @return thresholds. - */ - public Output thresholds() { - return thresholds; - } - - /** - * Gets leftNodeContribs. - * A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. - * @return leftNodeContribs. - */ - public Output leftNodeContribs() { - return leftNodeContribs; - } - - /** - * Gets rightNodeContribs. - * A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - * @return rightNodeContribs. - */ - public Output rightNodeContribs() { - return rightNodeContribs; - } - - /** - * Gets splitWithDefaultDirections. - * A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes. - * Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. - * @return splitWithDefaultDirections. - */ - public Output splitWithDefaultDirections() { - return splitWithDefaultDirections; - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesCalculateBestFeatureSplit} - */ - public static class Options { - private String splitType; - - private Options() { - } - - /** - * Sets the splitType option. - * - * @param splitType A string indicating if this Op should perform inequality split or equality split. - * @return this Options instance. - */ - public Options splitType(String splitType) { - this.splitType = splitType; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCalculateBestFeatureSplit.class - ) - public static class Inputs extends RawOpInputs { - /** - * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - */ - public final Operand nodeIdRange; - - /** - * A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. - * The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - */ - public final Operand statsSummary; - - /** - * l1 regularization factor on leaf weights, per instance based. - */ - public final Operand l1; - - /** - * l2 regularization factor on leaf weights, per instance based. - */ - public final Operand l2; - - /** - * adjustment to the gain, per leaf based. - */ - public final Operand treeComplexity; - - /** - * minimum avg of hessians in a node before required for the node to be considered for splitting. - */ - public final Operand minNodeWeight; - - /** - * The dimension of logit, i.e., number of classes. - */ - public final long logitsDimension; - - /** - * A string indicating if this Op should perform inequality split or equality split. - */ - public final String splitType; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCalculateBestFeatureSplit(op), op, Arrays.asList("logits_dimension", "split_type")); - int inputIndex = 0; - nodeIdRange = (Operand) op.input(inputIndex++); - statsSummary = (Operand) op.input(inputIndex++); - l1 = (Operand) op.input(inputIndex++); - l2 = (Operand) op.input(inputIndex++); - treeComplexity = (Operand) op.input(inputIndex++); - minNodeWeight = (Operand) op.input(inputIndex++); - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - splitType = op.attributes().getAttrString("split_type"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java deleted file mode 100644 index 52a1c17c65a..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java +++ /dev/null @@ -1,259 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; - -/** - * Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node. - * The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - *

      It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return {@code node_ids_list} for each feature, containing the list of nodes that this feature can be used to split. - *

      In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - *

      The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. - */ -@OpMetadata( - opType = BoostedTreesCalculateBestFeatureSplitV2.OP_NAME, - inputsClass = BoostedTreesCalculateBestFeatureSplitV2.Inputs.class -) -public final class BoostedTreesCalculateBestFeatureSplitV2 extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCalculateBestFeatureSplitV2"; - - private Output nodeIds; - - private Output gains; - - private Output featureIds; - - private Output featureDimensions; - - private Output thresholds; - - private Output leftNodeContribs; - - private Output rightNodeContribs; - - private Output splitWithDefaultDirections; - - public BoostedTreesCalculateBestFeatureSplitV2(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - nodeIds = operation.output(outputIdx++); - gains = operation.output(outputIdx++); - featureIds = operation.output(outputIdx++); - featureDimensions = operation.output(outputIdx++); - thresholds = operation.output(outputIdx++); - leftNodeContribs = operation.output(outputIdx++); - rightNodeContribs = operation.output(outputIdx++); - splitWithDefaultDirections = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCalculateBestFeatureSplitV2 operation. - * - * @param scope current scope - * @param nodeIdRange A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - * @param statsSummariesList A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. - * The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - * @param splitTypes A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature. - * @param candidateFeatureIds Rank 1 tensor with ids for each feature. This is the real id of the feature. - * @param l1 l1 regularization factor on leaf weights, per instance based. - * @param l2 l2 regularization factor on leaf weights, per instance based. - * @param treeComplexity adjustment to the gain, per leaf based. - * @param minNodeWeight minimum avg of hessians in a node before required for the node to be considered for splitting. - * @param logitsDimension The dimension of logit, i.e., number of classes. - * @return a new instance of BoostedTreesCalculateBestFeatureSplitV2 - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCalculateBestFeatureSplitV2 create(Scope scope, - Operand nodeIdRange, Iterable> statsSummariesList, - Operand splitTypes, Operand candidateFeatureIds, Operand l1, - Operand l2, Operand treeComplexity, Operand minNodeWeight, - Long logitsDimension) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCalculateBestFeatureSplitV2"); - opBuilder.addInput(nodeIdRange.asOutput()); - opBuilder.addInputList(Operands.asOutputs(statsSummariesList)); - opBuilder.addInput(splitTypes.asOutput()); - opBuilder.addInput(candidateFeatureIds.asOutput()); - opBuilder.addInput(l1.asOutput()); - opBuilder.addInput(l2.asOutput()); - opBuilder.addInput(treeComplexity.asOutput()); - opBuilder.addInput(minNodeWeight.asOutput()); - opBuilder.setAttr("logits_dimension", logitsDimension); - return new BoostedTreesCalculateBestFeatureSplitV2(opBuilder.build()); - } - - /** - * Gets nodeIds. - * A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. - * @return nodeIds. - */ - public Output nodeIds() { - return nodeIds; - } - - /** - * Gets gains. - * A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. - * @return gains. - */ - public Output gains() { - return gains; - } - - /** - * Gets featureIds. - * A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes. - * @return featureIds. - */ - public Output featureIds() { - return featureIds; - } - - /** - * Gets featureDimensions. - * A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. - * @return featureDimensions. - */ - public Output featureDimensions() { - return featureDimensions; - } - - /** - * Gets thresholds. - * A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. - * @return thresholds. - */ - public Output thresholds() { - return thresholds; - } - - /** - * Gets leftNodeContribs. - * A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. - * @return leftNodeContribs. - */ - public Output leftNodeContribs() { - return leftNodeContribs; - } - - /** - * Gets rightNodeContribs. - * A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - * @return rightNodeContribs. - */ - public Output rightNodeContribs() { - return rightNodeContribs; - } - - /** - * Gets splitWithDefaultDirections. - * A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes. - * Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. - * @return splitWithDefaultDirections. - */ - public Output splitWithDefaultDirections() { - return splitWithDefaultDirections; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCalculateBestFeatureSplitV2.class - ) - public static class Inputs extends RawOpInputs { - /** - * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - */ - public final Operand nodeIdRange; - - /** - * A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. - * The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - */ - public final Iterable> statsSummariesList; - - /** - * A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature. - */ - public final Operand splitTypes; - - /** - * Rank 1 tensor with ids for each feature. This is the real id of the feature. - */ - public final Operand candidateFeatureIds; - - /** - * l1 regularization factor on leaf weights, per instance based. - */ - public final Operand l1; - - /** - * l2 regularization factor on leaf weights, per instance based. - */ - public final Operand l2; - - /** - * adjustment to the gain, per leaf based. - */ - public final Operand treeComplexity; - - /** - * minimum avg of hessians in a node before required for the node to be considered for splitting. - */ - public final Operand minNodeWeight; - - /** - * The dimension of logit, i.e., number of classes. - */ - public final long logitsDimension; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCalculateBestFeatureSplitV2(op), op, Arrays.asList("logits_dimension")); - int inputIndex = 0; - nodeIdRange = (Operand) op.input(inputIndex++); - int statsSummariesListLength = op.inputListLength("stats_summaries_list"); - statsSummariesList = Arrays.asList((Operand[]) op.inputList(inputIndex, statsSummariesListLength)); - inputIndex += statsSummariesListLength; - splitTypes = (Operand) op.input(inputIndex++); - candidateFeatureIds = (Operand) op.input(inputIndex++); - l1 = (Operand) op.input(inputIndex++); - l2 = (Operand) op.input(inputIndex++); - treeComplexity = (Operand) op.input(inputIndex++); - minNodeWeight = (Operand) op.input(inputIndex++); - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java deleted file mode 100644 index 8b91e200217..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java +++ /dev/null @@ -1,215 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; - -/** - * Calculates gains for each feature and returns the best possible split information for the feature. - * The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - *

      It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return {@code node_ids_list} for each feature, containing the list of nodes that this feature can be used to split. - *

      In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - *

      The length of output lists are all of the same length, {@code num_features}. - * The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. - */ -@OpMetadata( - opType = BoostedTreesCalculateBestGainsPerFeature.OP_NAME, - inputsClass = BoostedTreesCalculateBestGainsPerFeature.Inputs.class -) -public final class BoostedTreesCalculateBestGainsPerFeature extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCalculateBestGainsPerFeature"; - - private List> nodeIdsList; - - private List> gainsList; - - private List> thresholdsList; - - private List> leftNodeContribsList; - - private List> rightNodeContribsList; - - @SuppressWarnings("unchecked") - public BoostedTreesCalculateBestGainsPerFeature(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int nodeIdsListLength = operation.outputListLength("node_ids_list"); - nodeIdsList = Arrays.asList((Output[]) operation.outputList(outputIdx, nodeIdsListLength)); - outputIdx += nodeIdsListLength; - int gainsListLength = operation.outputListLength("gains_list"); - gainsList = Arrays.asList((Output[]) operation.outputList(outputIdx, gainsListLength)); - outputIdx += gainsListLength; - int thresholdsListLength = operation.outputListLength("thresholds_list"); - thresholdsList = Arrays.asList((Output[]) operation.outputList(outputIdx, thresholdsListLength)); - outputIdx += thresholdsListLength; - int leftNodeContribsListLength = operation.outputListLength("left_node_contribs_list"); - leftNodeContribsList = Arrays.asList((Output[]) operation.outputList(outputIdx, leftNodeContribsListLength)); - outputIdx += leftNodeContribsListLength; - int rightNodeContribsListLength = operation.outputListLength("right_node_contribs_list"); - rightNodeContribsList = Arrays.asList((Output[]) operation.outputList(outputIdx, rightNodeContribsListLength)); - outputIdx += rightNodeContribsListLength; - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCalculateBestGainsPerFeature operation. - * - * @param scope current scope - * @param nodeIdRange A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - * @param statsSummaryList A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - * @param l1 l1 regularization factor on leaf weights, per instance based. - * @param l2 l2 regularization factor on leaf weights, per instance based. - * @param treeComplexity adjustment to the gain, per leaf based. - * @param minNodeWeight minimum avg of hessians in a node before required for the node to be considered for splitting. - * @param maxSplits the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. - * @return a new instance of BoostedTreesCalculateBestGainsPerFeature - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCalculateBestGainsPerFeature create(Scope scope, - Operand nodeIdRange, Iterable> statsSummaryList, - Operand l1, Operand l2, Operand treeComplexity, - Operand minNodeWeight, Long maxSplits) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCalculateBestGainsPerFeature"); - opBuilder.addInput(nodeIdRange.asOutput()); - opBuilder.addInputList(Operands.asOutputs(statsSummaryList)); - opBuilder.addInput(l1.asOutput()); - opBuilder.addInput(l2.asOutput()); - opBuilder.addInput(treeComplexity.asOutput()); - opBuilder.addInput(minNodeWeight.asOutput()); - opBuilder.setAttr("max_splits", maxSplits); - return new BoostedTreesCalculateBestGainsPerFeature(opBuilder.build()); - } - - /** - * Gets nodeIdsList. - * An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. - * @return nodeIdsList. - */ - public List> nodeIdsList() { - return nodeIdsList; - } - - /** - * Gets gainsList. - * An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. - * @return gainsList. - */ - public List> gainsList() { - return gainsList; - } - - /** - * Gets thresholdsList. - * An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. - * @return thresholdsList. - */ - public List> thresholdsList() { - return thresholdsList; - } - - /** - * Gets leftNodeContribsList. - * A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. - * @return leftNodeContribsList. - */ - public List> leftNodeContribsList() { - return leftNodeContribsList; - } - - /** - * Gets rightNodeContribsList. - * A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - * @return rightNodeContribsList. - */ - public List> rightNodeContribsList() { - return rightNodeContribsList; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCalculateBestGainsPerFeature.class - ) - public static class Inputs extends RawOpInputs { - /** - * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - */ - public final Operand nodeIdRange; - - /** - * A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - */ - public final Iterable> statsSummaryList; - - /** - * l1 regularization factor on leaf weights, per instance based. - */ - public final Operand l1; - - /** - * l2 regularization factor on leaf weights, per instance based. - */ - public final Operand l2; - - /** - * adjustment to the gain, per leaf based. - */ - public final Operand treeComplexity; - - /** - * minimum avg of hessians in a node before required for the node to be considered for splitting. - */ - public final Operand minNodeWeight; - - /** - * the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. - */ - public final long maxSplits; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCalculateBestGainsPerFeature(op), op, Arrays.asList("max_splits")); - int inputIndex = 0; - nodeIdRange = (Operand) op.input(inputIndex++); - int statsSummaryListLength = op.inputListLength("stats_summary_list"); - statsSummaryList = Arrays.asList((Operand[]) op.inputList(inputIndex, statsSummaryListLength)); - inputIndex += statsSummaryListLength; - l1 = (Operand) op.input(inputIndex++); - l2 = (Operand) op.input(inputIndex++); - treeComplexity = (Operand) op.input(inputIndex++); - minNodeWeight = (Operand) op.input(inputIndex++); - maxSplits = op.attributes().getAttrInt("max_splits"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java deleted file mode 100644 index 9c4073ab5ee..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. - */ -@OpMetadata( - opType = BoostedTreesCenterBias.OP_NAME, - inputsClass = BoostedTreesCenterBias.Inputs.class -) -public final class BoostedTreesCenterBias extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCenterBias"; - - private Output continueCentering; - - public BoostedTreesCenterBias(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - continueCentering = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCenterBias operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble. - * @param meanGradients A tensor with shape=[logits_dimension] with mean of gradients for a first node. - * @param meanHessians A tensor with shape=[logits_dimension] mean of hessians for a first node. - * @param l1 l1 regularization factor on leaf weights, per instance based. - * @param l2 l2 regularization factor on leaf weights, per instance based. - * @return a new instance of BoostedTreesCenterBias - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCenterBias create(Scope scope, - Operand treeEnsembleHandle, Operand meanGradients, - Operand meanHessians, Operand l1, Operand l2) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCenterBias"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInput(meanGradients.asOutput()); - opBuilder.addInput(meanHessians.asOutput()); - opBuilder.addInput(l1.asOutput()); - opBuilder.addInput(l2.asOutput()); - return new BoostedTreesCenterBias(opBuilder.build()); - } - - /** - * Gets continueCentering. - * Bool, whether to continue bias centering. - * @return continueCentering. - */ - public Output continueCentering() { - return continueCentering; - } - - @Override - public Output asOutput() { - return continueCentering; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCenterBias.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble. - */ - public final Operand treeEnsembleHandle; - - /** - * A tensor with shape=[logits_dimension] with mean of gradients for a first node. - */ - public final Operand meanGradients; - - /** - * A tensor with shape=[logits_dimension] mean of hessians for a first node. - */ - public final Operand meanHessians; - - /** - * l1 regularization factor on leaf weights, per instance based. - */ - public final Operand l1; - - /** - * l2 regularization factor on leaf weights, per instance based. - */ - public final Operand l2; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCenterBias(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - meanGradients = (Operand) op.input(inputIndex++); - meanHessians = (Operand) op.input(inputIndex++); - l1 = (Operand) op.input(inputIndex++); - l2 = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java deleted file mode 100644 index 63bfe3ad920..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * Creates a tree ensemble model and returns a handle to it. - */ -@OpMetadata( - opType = BoostedTreesCreateEnsemble.OP_NAME, - inputsClass = BoostedTreesCreateEnsemble.Inputs.class -) -public final class BoostedTreesCreateEnsemble extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCreateEnsemble"; - - public BoostedTreesCreateEnsemble(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCreateEnsemble operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble resource to be created. - * @param stampToken Token to use as the initial value of the resource stamp. - * @param treeEnsembleSerialized Serialized proto of the tree ensemble. - * @return a new instance of BoostedTreesCreateEnsemble - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCreateEnsemble create(Scope scope, - Operand treeEnsembleHandle, Operand stampToken, - Operand treeEnsembleSerialized) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCreateEnsemble"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInput(stampToken.asOutput()); - opBuilder.addInput(treeEnsembleSerialized.asOutput()); - return new BoostedTreesCreateEnsemble(opBuilder.build()); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCreateEnsemble.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble resource to be created. - */ - public final Operand treeEnsembleHandle; - - /** - * Token to use as the initial value of the resource stamp. - */ - public final Operand stampToken; - - /** - * Serialized proto of the tree ensemble. - */ - public final Operand treeEnsembleSerialized; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCreateEnsemble(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - stampToken = (Operand) op.input(inputIndex++); - treeEnsembleSerialized = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java deleted file mode 100644 index 06f3c56e7cb..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java +++ /dev/null @@ -1,146 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.family.TType; - -/** - * Create the Resource for Quantile Streams. - */ -@OpMetadata( - opType = BoostedTreesCreateQuantileStreamResource.OP_NAME, - inputsClass = BoostedTreesCreateQuantileStreamResource.Inputs.class -) -public final class BoostedTreesCreateQuantileStreamResource extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesCreateQuantileStreamResource"; - - public BoostedTreesCreateQuantileStreamResource(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesCreateQuantileStreamResource operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource; Handle to quantile stream resource. - * @param epsilon float; The required approximation error of the stream resource. - * @param numStreams int; The number of streams managed by the resource that shares the same epsilon. - * @param options carries optional attribute values - * @return a new instance of BoostedTreesCreateQuantileStreamResource - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesCreateQuantileStreamResource create(Scope scope, - Operand quantileStreamResourceHandle, Operand epsilon, - Operand numStreams, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesCreateQuantileStreamResource"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.addInput(epsilon.asOutput()); - opBuilder.addInput(numStreams.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.maxElements != null) { - opBuilder.setAttr("max_elements", opts.maxElements); - } - } - } - return new BoostedTreesCreateQuantileStreamResource(opBuilder.build()); - } - - /** - * Sets the maxElements option. - * - * @param maxElements int; The maximum number of data points that can be fed to the stream. - * @return this Options instance. - */ - public static Options maxElements(Long maxElements) { - return new Options().maxElements(maxElements); - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesCreateQuantileStreamResource} - */ - public static class Options { - private Long maxElements; - - private Options() { - } - - /** - * Sets the maxElements option. - * - * @param maxElements int; The maximum number of data points that can be fed to the stream. - * @return this Options instance. - */ - public Options maxElements(Long maxElements) { - this.maxElements = maxElements; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesCreateQuantileStreamResource.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource; Handle to quantile stream resource. - */ - public final Operand quantileStreamResourceHandle; - - /** - * float; The required approximation error of the stream resource. - */ - public final Operand epsilon; - - /** - * int; The number of streams managed by the resource that shares the same epsilon. - */ - public final Operand numStreams; - - /** - * int; The maximum number of data points that can be fed to the stream. - */ - public final long maxElements; - - public Inputs(GraphOperation op) { - super(new BoostedTreesCreateQuantileStreamResource(op), op, Arrays.asList("max_elements")); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - epsilon = (Operand) op.input(inputIndex++); - numStreams = (Operand) op.input(inputIndex++); - maxElements = op.attributes().getAttrInt("max_elements"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java deleted file mode 100644 index 371500c3064..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * Deserializes a serialized tree ensemble config and replaces current tree - * ensemble. - */ -@OpMetadata( - opType = BoostedTreesDeserializeEnsemble.OP_NAME, - inputsClass = BoostedTreesDeserializeEnsemble.Inputs.class -) -public final class BoostedTreesDeserializeEnsemble extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesDeserializeEnsemble"; - - public BoostedTreesDeserializeEnsemble(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesDeserializeEnsemble operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble. - * @param stampToken Token to use as the new value of the resource stamp. - * @param treeEnsembleSerialized Serialized proto of the ensemble. - * @return a new instance of BoostedTreesDeserializeEnsemble - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesDeserializeEnsemble create(Scope scope, - Operand treeEnsembleHandle, Operand stampToken, - Operand treeEnsembleSerialized) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesDeserializeEnsemble"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInput(stampToken.asOutput()); - opBuilder.addInput(treeEnsembleSerialized.asOutput()); - return new BoostedTreesDeserializeEnsemble(opBuilder.build()); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesDeserializeEnsemble.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble. - */ - public final Operand treeEnsembleHandle; - - /** - * Token to use as the new value of the resource stamp. - */ - public final Operand stampToken; - - /** - * Serialized proto of the ensemble. - */ - public final Operand treeEnsembleSerialized; - - public Inputs(GraphOperation op) { - super(new BoostedTreesDeserializeEnsemble(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - stampToken = (Operand) op.input(inputIndex++); - treeEnsembleSerialized = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java deleted file mode 100644 index 6afffd3a1ec..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java +++ /dev/null @@ -1,171 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.family.TType; - -/** - * Creates a handle to a BoostedTreesEnsembleResource - */ -@OpMetadata( - opType = BoostedTreesEnsembleResourceHandleOp.OP_NAME, - inputsClass = BoostedTreesEnsembleResourceHandleOp.Inputs.class -) -public final class BoostedTreesEnsembleResourceHandleOp extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesEnsembleResourceHandleOp"; - - private Output resource; - - @SuppressWarnings("unchecked") - public BoostedTreesEnsembleResourceHandleOp(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - resource = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesEnsembleResourceHandleOp operation. - * - * @param scope current scope - * @param options carries optional attribute values - * @return a new instance of BoostedTreesEnsembleResourceHandleOp - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesEnsembleResourceHandleOp create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesEnsembleResourceHandleOp"); - if (options != null) { - for (Options opts : options) { - if (opts.container != null) { - opBuilder.setAttr("container", opts.container); - } - if (opts.sharedName != null) { - opBuilder.setAttr("shared_name", opts.sharedName); - } - } - } - return new BoostedTreesEnsembleResourceHandleOp(opBuilder.build()); - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public static Options container(String container) { - return new Options().container(container); - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public static Options sharedName(String sharedName) { - return new Options().sharedName(sharedName); - } - - /** - * Gets resource. - * - * @return resource. - */ - public Output resource() { - return resource; - } - - @Override - @SuppressWarnings("unchecked") - public Output asOutput() { - return (Output) resource; - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesEnsembleResourceHandleOp} - */ - public static class Options { - private String container; - - private String sharedName; - - private Options() { - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public Options container(String container) { - this.container = container; - return this; - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public Options sharedName(String sharedName) { - this.sharedName = sharedName; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesEnsembleResourceHandleOp.class - ) - public static class Inputs extends RawOpInputs { - /** - * The container attribute - */ - public final String container; - - /** - * The sharedName attribute - */ - public final String sharedName; - - public Inputs(GraphOperation op) { - super(new BoostedTreesEnsembleResourceHandleOp(op), op, Arrays.asList("container", "shared_name")); - int inputIndex = 0; - container = op.attributes().getAttrString("container"); - sharedName = op.attributes().getAttrString("shared_name"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java deleted file mode 100644 index 06ee048d9f5..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * Debugging/model interpretability outputs for each example. - * It traverses all the trees and computes debug metrics for individual examples, - * such as getting split feature ids and logits after each split along the decision - * path used to compute directional feature contributions. - */ -@OpMetadata( - opType = BoostedTreesExampleDebugOutputs.OP_NAME, - inputsClass = BoostedTreesExampleDebugOutputs.Inputs.class -) -public final class BoostedTreesExampleDebugOutputs extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesExampleDebugOutputs"; - - private Output examplesDebugOutputsSerialized; - - public BoostedTreesExampleDebugOutputs(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - examplesDebugOutputsSerialized = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesExampleDebugOutputs operation. - * - * @param scope current scope - * @param treeEnsembleHandle The treeEnsembleHandle value - * @param bucketizedFeatures A list of rank 1 Tensors containing bucket id for each - * feature. - * @param logitsDimension scalar, dimension of the logits, to be used for constructing the protos in - * examples_debug_outputs_serialized. - * @return a new instance of BoostedTreesExampleDebugOutputs - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesExampleDebugOutputs create(Scope scope, - Operand treeEnsembleHandle, Iterable> bucketizedFeatures, - Long logitsDimension) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesExampleDebugOutputs"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInputList(Operands.asOutputs(bucketizedFeatures)); - opBuilder.setAttr("logits_dimension", logitsDimension); - return new BoostedTreesExampleDebugOutputs(opBuilder.build()); - } - - /** - * Gets examplesDebugOutputsSerialized. - * Output rank 1 Tensor containing a proto serialized as a string for each example. - * @return examplesDebugOutputsSerialized. - */ - public Output examplesDebugOutputsSerialized() { - return examplesDebugOutputsSerialized; - } - - @Override - public Output asOutput() { - return examplesDebugOutputsSerialized; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesExampleDebugOutputs.class - ) - public static class Inputs extends RawOpInputs { - /** - * The treeEnsembleHandle input - */ - public final Operand treeEnsembleHandle; - - /** - * A list of rank 1 Tensors containing bucket id for each - * feature. - */ - public final Iterable> bucketizedFeatures; - - /** - * scalar, dimension of the logits, to be used for constructing the protos in - * examples_debug_outputs_serialized. - */ - public final long logitsDimension; - - public Inputs(GraphOperation op) { - super(new BoostedTreesExampleDebugOutputs(op), op, Arrays.asList("logits_dimension")); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - int bucketizedFeaturesLength = op.inputListLength("bucketized_features"); - bucketizedFeatures = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketizedFeaturesLength)); - inputIndex += bucketizedFeaturesLength; - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java deleted file mode 100644 index 3a523c29430..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java +++ /dev/null @@ -1,113 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Flush the quantile summaries from each quantile stream resource. - * An op that outputs a list of quantile summaries of a quantile stream resource. - * Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, - * max_rank) for a single feature. - */ -@OpMetadata( - opType = BoostedTreesFlushQuantileSummaries.OP_NAME, - inputsClass = BoostedTreesFlushQuantileSummaries.Inputs.class -) -public final class BoostedTreesFlushQuantileSummaries extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesFlushQuantileSummaries"; - - private List> summaries; - - @SuppressWarnings("unchecked") - public BoostedTreesFlushQuantileSummaries(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int summariesLength = operation.outputListLength("summaries"); - summaries = Arrays.asList((Output[]) operation.outputList(outputIdx, summariesLength)); - outputIdx += summariesLength; - } - - /** - * Factory method to create a class wrapping a new BoostedTreesFlushQuantileSummaries operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource handle referring to a QuantileStreamResource. - * @param numFeatures The value of the numFeatures attribute - * @return a new instance of BoostedTreesFlushQuantileSummaries - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesFlushQuantileSummaries create(Scope scope, - Operand quantileStreamResourceHandle, Long numFeatures) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesFlushQuantileSummaries"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.setAttr("num_features", numFeatures); - return new BoostedTreesFlushQuantileSummaries(opBuilder.build()); - } - - /** - * Gets summaries. - * - * @return summaries. - */ - public List> summaries() { - return summaries; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) summaries.iterator(); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesFlushQuantileSummaries.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource handle referring to a QuantileStreamResource. - */ - public final Operand quantileStreamResourceHandle; - - public Inputs(GraphOperation op) { - super(new BoostedTreesFlushQuantileSummaries(op), op, Arrays.asList()); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java deleted file mode 100644 index 09d0bdeedd1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.family.TType; - -/** - * Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. - */ -@OpMetadata( - opType = BoostedTreesGetEnsembleStates.OP_NAME, - inputsClass = BoostedTreesGetEnsembleStates.Inputs.class -) -public final class BoostedTreesGetEnsembleStates extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesGetEnsembleStates"; - - private Output stampToken; - - private Output numTrees; - - private Output numFinalizedTrees; - - private Output numAttemptedLayers; - - private Output lastLayerNodesRange; - - public BoostedTreesGetEnsembleStates(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - stampToken = operation.output(outputIdx++); - numTrees = operation.output(outputIdx++); - numFinalizedTrees = operation.output(outputIdx++); - numAttemptedLayers = operation.output(outputIdx++); - lastLayerNodesRange = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesGetEnsembleStates operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble. - * @return a new instance of BoostedTreesGetEnsembleStates - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesGetEnsembleStates create(Scope scope, - Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesGetEnsembleStates"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - return new BoostedTreesGetEnsembleStates(opBuilder.build()); - } - - /** - * Gets stampToken. - * Stamp token of the tree ensemble resource. - * @return stampToken. - */ - public Output stampToken() { - return stampToken; - } - - /** - * Gets numTrees. - * The number of trees in the tree ensemble resource. - * @return numTrees. - */ - public Output numTrees() { - return numTrees; - } - - /** - * Gets numFinalizedTrees. - * The number of trees that were finished successfully. - * @return numFinalizedTrees. - */ - public Output numFinalizedTrees() { - return numFinalizedTrees; - } - - /** - * Gets numAttemptedLayers. - * The number of layers we attempted to build (but not necessarily succeeded). - * @return numAttemptedLayers. - */ - public Output numAttemptedLayers() { - return numAttemptedLayers; - } - - /** - * Gets lastLayerNodesRange. - * Rank size 2 tensor that contains start and end ids of the nodes in the latest - * layer. - * @return lastLayerNodesRange. - */ - public Output lastLayerNodesRange() { - return lastLayerNodesRange; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesGetEnsembleStates.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble. - */ - public final Operand treeEnsembleHandle; - - public Inputs(GraphOperation op) { - super(new BoostedTreesGetEnsembleStates(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java deleted file mode 100644 index 27c220672c3..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Makes the summary of quantiles for the batch. - * An op that takes a list of tensors (one tensor per feature) and outputs the - * quantile summaries for each tensor. - */ -@OpMetadata( - opType = BoostedTreesMakeQuantileSummaries.OP_NAME, - inputsClass = BoostedTreesMakeQuantileSummaries.Inputs.class -) -public final class BoostedTreesMakeQuantileSummaries extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesMakeQuantileSummaries"; - - private List> summaries; - - @SuppressWarnings("unchecked") - public BoostedTreesMakeQuantileSummaries(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int summariesLength = operation.outputListLength("summaries"); - summaries = Arrays.asList((Output[]) operation.outputList(outputIdx, summariesLength)); - outputIdx += summariesLength; - } - - /** - * Factory method to create a class wrapping a new BoostedTreesMakeQuantileSummaries operation. - * - * @param scope current scope - * @param floatValues float; List of Rank 1 Tensors each containing values for a single feature. - * @param exampleWeights float; Rank 1 Tensor with weights per instance. - * @param epsilon float; The required maximum approximation error. - * @return a new instance of BoostedTreesMakeQuantileSummaries - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesMakeQuantileSummaries create(Scope scope, - Iterable> floatValues, Operand exampleWeights, - Operand epsilon) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesMakeQuantileSummaries"); - opBuilder.addInputList(Operands.asOutputs(floatValues)); - opBuilder.addInput(exampleWeights.asOutput()); - opBuilder.addInput(epsilon.asOutput()); - return new BoostedTreesMakeQuantileSummaries(opBuilder.build()); - } - - /** - * Gets summaries. - * float; List of Rank 2 Tensors each containing the quantile summary - * (value, weight, min_rank, max_rank) of a single feature. - * @return summaries. - */ - public List> summaries() { - return summaries; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) summaries.iterator(); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesMakeQuantileSummaries.class - ) - public static class Inputs extends RawOpInputs { - /** - * float; List of Rank 1 Tensors each containing values for a single feature. - */ - public final Iterable> floatValues; - - /** - * float; Rank 1 Tensor with weights per instance. - */ - public final Operand exampleWeights; - - /** - * float; The required maximum approximation error. - */ - public final Operand epsilon; - - public Inputs(GraphOperation op) { - super(new BoostedTreesMakeQuantileSummaries(op), op, Arrays.asList()); - int inputIndex = 0; - int floatValuesLength = op.inputListLength("float_values"); - floatValues = Arrays.asList((Operand[]) op.inputList(inputIndex, floatValuesLength)); - inputIndex += floatValuesLength; - exampleWeights = (Operand) op.input(inputIndex++); - epsilon = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java deleted file mode 100644 index 4516dfdc8b1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; - -/** - * Makes the summary of accumulated stats for the batch. - * The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. - */ -@OpMetadata( - opType = BoostedTreesMakeStatsSummary.OP_NAME, - inputsClass = BoostedTreesMakeStatsSummary.Inputs.class -) -public final class BoostedTreesMakeStatsSummary extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesMakeStatsSummary"; - - private Output statsSummary; - - public BoostedTreesMakeStatsSummary(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - statsSummary = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesMakeStatsSummary operation. - * - * @param scope current scope - * @param nodeIds int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. - * @param gradients float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. - * @param hessians float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. - * @param bucketizedFeaturesList int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). - * @param maxSplits int; the maximum number of splits possible in the whole tree. - * @param numBuckets int; equals to the maximum possible value of bucketized feature. - * @return a new instance of BoostedTreesMakeStatsSummary - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesMakeStatsSummary create(Scope scope, Operand nodeIds, - Operand gradients, Operand hessians, - Iterable> bucketizedFeaturesList, Long maxSplits, Long numBuckets) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesMakeStatsSummary"); - opBuilder.addInput(nodeIds.asOutput()); - opBuilder.addInput(gradients.asOutput()); - opBuilder.addInput(hessians.asOutput()); - opBuilder.addInputList(Operands.asOutputs(bucketizedFeaturesList)); - opBuilder.setAttr("max_splits", maxSplits); - opBuilder.setAttr("num_buckets", numBuckets); - return new BoostedTreesMakeStatsSummary(opBuilder.build()); - } - - /** - * Gets statsSummary. - * output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians. - * @return statsSummary. - */ - public Output statsSummary() { - return statsSummary; - } - - @Override - public Output asOutput() { - return statsSummary; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesMakeStatsSummary.class - ) - public static class Inputs extends RawOpInputs { - /** - * int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. - */ - public final Operand nodeIds; - - /** - * float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. - */ - public final Operand gradients; - - /** - * float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. - */ - public final Operand hessians; - - /** - * int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). - */ - public final Iterable> bucketizedFeaturesList; - - /** - * int; the maximum number of splits possible in the whole tree. - */ - public final long maxSplits; - - /** - * int; equals to the maximum possible value of bucketized feature. - */ - public final long numBuckets; - - public Inputs(GraphOperation op) { - super(new BoostedTreesMakeStatsSummary(op), op, Arrays.asList("max_splits", "num_buckets")); - int inputIndex = 0; - nodeIds = (Operand) op.input(inputIndex++); - gradients = (Operand) op.input(inputIndex++); - hessians = (Operand) op.input(inputIndex++); - int bucketizedFeaturesListLength = op.inputListLength("bucketized_features_list"); - bucketizedFeaturesList = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketizedFeaturesListLength)); - inputIndex += bucketizedFeaturesListLength; - maxSplits = op.attributes().getAttrInt("max_splits"); - numBuckets = op.attributes().getAttrInt("num_buckets"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java deleted file mode 100644 index a39b3ad733b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TType; - -/** - * Runs multiple additive regression ensemble predictors on input instances and - * computes the logits. It is designed to be used during prediction. - * It traverses all the trees and calculates the final score for each instance. - */ -@OpMetadata( - opType = BoostedTreesPredict.OP_NAME, - inputsClass = BoostedTreesPredict.Inputs.class -) -public final class BoostedTreesPredict extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesPredict"; - - private Output logits; - - public BoostedTreesPredict(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - logits = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesPredict operation. - * - * @param scope current scope - * @param treeEnsembleHandle The treeEnsembleHandle value - * @param bucketizedFeatures A list of rank 1 Tensors containing bucket id for each - * feature. - * @param logitsDimension scalar, dimension of the logits, to be used for partial logits - * shape. - * @return a new instance of BoostedTreesPredict - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesPredict create(Scope scope, Operand treeEnsembleHandle, - Iterable> bucketizedFeatures, Long logitsDimension) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesPredict"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInputList(Operands.asOutputs(bucketizedFeatures)); - opBuilder.setAttr("logits_dimension", logitsDimension); - return new BoostedTreesPredict(opBuilder.build()); - } - - /** - * Gets logits. - * Output rank 2 Tensor containing logits for each example. - * @return logits. - */ - public Output logits() { - return logits; - } - - @Override - public Output asOutput() { - return logits; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesPredict.class - ) - public static class Inputs extends RawOpInputs { - /** - * The treeEnsembleHandle input - */ - public final Operand treeEnsembleHandle; - - /** - * A list of rank 1 Tensors containing bucket id for each - * feature. - */ - public final Iterable> bucketizedFeatures; - - /** - * scalar, dimension of the logits, to be used for partial logits - * shape. - */ - public final long logitsDimension; - - public Inputs(GraphOperation op) { - super(new BoostedTreesPredict(op), op, Arrays.asList("logits_dimension")); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - int bucketizedFeaturesLength = op.inputListLength("bucketized_features"); - bucketizedFeatures = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketizedFeaturesLength)); - inputIndex += bucketizedFeaturesLength; - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java deleted file mode 100644 index f122432eef1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Add the quantile summaries to each quantile stream resource. - * An op that adds a list of quantile summaries to a quantile stream resource. Each - * summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) - * for a single feature. - */ -@OpMetadata( - opType = BoostedTreesQuantileStreamResourceAddSummaries.OP_NAME, - inputsClass = BoostedTreesQuantileStreamResourceAddSummaries.Inputs.class -) -public final class BoostedTreesQuantileStreamResourceAddSummaries extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesQuantileStreamResourceAddSummaries"; - - public BoostedTreesQuantileStreamResourceAddSummaries(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesQuantileStreamResourceAddSummaries operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource handle referring to a QuantileStreamResource. - * @param summaries string; List of Rank 2 Tensor each containing the summaries for a single feature. - * @return a new instance of BoostedTreesQuantileStreamResourceAddSummaries - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesQuantileStreamResourceAddSummaries create(Scope scope, - Operand quantileStreamResourceHandle, - Iterable> summaries) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesQuantileStreamResourceAddSummaries"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.addInputList(Operands.asOutputs(summaries)); - return new BoostedTreesQuantileStreamResourceAddSummaries(opBuilder.build()); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesQuantileStreamResourceAddSummaries.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource handle referring to a QuantileStreamResource. - */ - public final Operand quantileStreamResourceHandle; - - /** - * string; List of Rank 2 Tensor each containing the summaries for a single feature. - */ - public final Iterable> summaries; - - public Inputs(GraphOperation op) { - super(new BoostedTreesQuantileStreamResourceAddSummaries(op), op, Arrays.asList()); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - int summariesLength = op.inputListLength("summaries"); - summaries = Arrays.asList((Operand[]) op.inputList(inputIndex, summariesLength)); - inputIndex += summariesLength; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java deleted file mode 100644 index 691d81c3951..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Deserialize bucket boundaries and ready flag into current QuantileAccumulator. - * An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator. - */ -@OpMetadata( - opType = BoostedTreesQuantileStreamResourceDeserialize.OP_NAME, - inputsClass = BoostedTreesQuantileStreamResourceDeserialize.Inputs.class -) -public final class BoostedTreesQuantileStreamResourceDeserialize extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesQuantileStreamResourceDeserialize"; - - public BoostedTreesQuantileStreamResourceDeserialize(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesQuantileStreamResourceDeserialize operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource handle referring to a QuantileStreamResource. - * @param bucketBoundaries float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. - * @return a new instance of BoostedTreesQuantileStreamResourceDeserialize - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesQuantileStreamResourceDeserialize create(Scope scope, - Operand quantileStreamResourceHandle, - Iterable> bucketBoundaries) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesQuantileStreamResourceDeserialize"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.addInputList(Operands.asOutputs(bucketBoundaries)); - return new BoostedTreesQuantileStreamResourceDeserialize(opBuilder.build()); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesQuantileStreamResourceDeserialize.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource handle referring to a QuantileStreamResource. - */ - public final Operand quantileStreamResourceHandle; - - /** - * float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. - */ - public final Iterable> bucketBoundaries; - - public Inputs(GraphOperation op) { - super(new BoostedTreesQuantileStreamResourceDeserialize(op), op, Arrays.asList()); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - int bucketBoundariesLength = op.inputListLength("bucket_boundaries"); - bucketBoundaries = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketBoundariesLength)); - inputIndex += bucketBoundariesLength; - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java deleted file mode 100644 index ab971af5a27..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java +++ /dev/null @@ -1,153 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.family.TType; - -/** - * Flush the summaries for a quantile stream resource. - * An op that flushes the summaries for a quantile stream resource. - */ -@OpMetadata( - opType = BoostedTreesQuantileStreamResourceFlush.OP_NAME, - inputsClass = BoostedTreesQuantileStreamResourceFlush.Inputs.class -) -public final class BoostedTreesQuantileStreamResourceFlush extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesQuantileStreamResourceFlush"; - - public BoostedTreesQuantileStreamResourceFlush(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesQuantileStreamResourceFlush operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource handle referring to a QuantileStreamResource. - * @param numBuckets int; approximate number of buckets unless using generate_quantiles. - * @param options carries optional attribute values - * @return a new instance of BoostedTreesQuantileStreamResourceFlush - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesQuantileStreamResourceFlush create(Scope scope, - Operand quantileStreamResourceHandle, Operand numBuckets, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesQuantileStreamResourceFlush"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.addInput(numBuckets.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.generateQuantiles != null) { - opBuilder.setAttr("generate_quantiles", opts.generateQuantiles); - } - } - } - return new BoostedTreesQuantileStreamResourceFlush(opBuilder.build()); - } - - /** - * Sets the generateQuantiles option. - * - * @param generateQuantiles bool; If True, the output will be the num_quantiles for each stream where the ith - * entry is the ith quantile of the input with an approximation error of epsilon. - * Duplicate values may be present. - * If False, the output will be the points in the histogram that we got which roughly - * translates to 1/epsilon boundaries and without any duplicates. - * Default to False. - * @return this Options instance. - */ - public static Options generateQuantiles(Boolean generateQuantiles) { - return new Options().generateQuantiles(generateQuantiles); - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesQuantileStreamResourceFlush} - */ - public static class Options { - private Boolean generateQuantiles; - - private Options() { - } - - /** - * Sets the generateQuantiles option. - * - * @param generateQuantiles bool; If True, the output will be the num_quantiles for each stream where the ith - * entry is the ith quantile of the input with an approximation error of epsilon. - * Duplicate values may be present. - * If False, the output will be the points in the histogram that we got which roughly - * translates to 1/epsilon boundaries and without any duplicates. - * Default to False. - * @return this Options instance. - */ - public Options generateQuantiles(Boolean generateQuantiles) { - this.generateQuantiles = generateQuantiles; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesQuantileStreamResourceFlush.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource handle referring to a QuantileStreamResource. - */ - public final Operand quantileStreamResourceHandle; - - /** - * int; approximate number of buckets unless using generate_quantiles. - */ - public final Operand numBuckets; - - /** - * bool; If True, the output will be the num_quantiles for each stream where the ith - * entry is the ith quantile of the input with an approximation error of epsilon. - * Duplicate values may be present. - * If False, the output will be the points in the histogram that we got which roughly - * translates to 1/epsilon boundaries and without any duplicates. - * Default to False. - */ - public final boolean generateQuantiles; - - public Inputs(GraphOperation op) { - super(new BoostedTreesQuantileStreamResourceFlush(op), op, Arrays.asList("generate_quantiles")); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - numBuckets = (Operand) op.input(inputIndex++); - generateQuantiles = op.attributes().getAttrBool("generate_quantiles"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java deleted file mode 100644 index 1a472902880..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TType; - -/** - * Generate the bucket boundaries for each feature based on accumulated summaries. - * An op that returns a list of float tensors for a quantile stream resource. Each - * tensor is Rank 1 containing bucket boundaries for a single feature. - */ -@OpMetadata( - opType = BoostedTreesQuantileStreamResourceGetBucketBoundaries.OP_NAME, - inputsClass = BoostedTreesQuantileStreamResourceGetBucketBoundaries.Inputs.class -) -public final class BoostedTreesQuantileStreamResourceGetBucketBoundaries extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesQuantileStreamResourceGetBucketBoundaries"; - - private List> bucketBoundaries; - - @SuppressWarnings("unchecked") - public BoostedTreesQuantileStreamResourceGetBucketBoundaries(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int bucketBoundariesLength = operation.outputListLength("bucket_boundaries"); - bucketBoundaries = Arrays.asList((Output[]) operation.outputList(outputIdx, bucketBoundariesLength)); - outputIdx += bucketBoundariesLength; - } - - /** - * Factory method to create a class wrapping a new BoostedTreesQuantileStreamResourceGetBucketBoundaries operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource handle referring to a QuantileStreamResource. - * @param numFeatures inferred int; number of features to get bucket boundaries for. - * @return a new instance of BoostedTreesQuantileStreamResourceGetBucketBoundaries - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesQuantileStreamResourceGetBucketBoundaries create(Scope scope, - Operand quantileStreamResourceHandle, Long numFeatures) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesQuantileStreamResourceGetBucketBoundaries"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - opBuilder.setAttr("num_features", numFeatures); - return new BoostedTreesQuantileStreamResourceGetBucketBoundaries(opBuilder.build()); - } - - /** - * Gets bucketBoundaries. - * float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. - * @return bucketBoundaries. - */ - public List> bucketBoundaries() { - return bucketBoundaries; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) bucketBoundaries.iterator(); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesQuantileStreamResourceGetBucketBoundaries.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource handle referring to a QuantileStreamResource. - */ - public final Operand quantileStreamResourceHandle; - - public Inputs(GraphOperation op) { - super(new BoostedTreesQuantileStreamResourceGetBucketBoundaries(op), op, Arrays.asList()); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java deleted file mode 100644 index bc58dbfcd15..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java +++ /dev/null @@ -1,171 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.family.TType; - -/** - * Creates a handle to a BoostedTreesQuantileStreamResource. - */ -@OpMetadata( - opType = BoostedTreesQuantileStreamResourceHandleOp.OP_NAME, - inputsClass = BoostedTreesQuantileStreamResourceHandleOp.Inputs.class -) -public final class BoostedTreesQuantileStreamResourceHandleOp extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesQuantileStreamResourceHandleOp"; - - private Output resource; - - @SuppressWarnings("unchecked") - public BoostedTreesQuantileStreamResourceHandleOp(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - resource = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesQuantileStreamResourceHandleOp operation. - * - * @param scope current scope - * @param options carries optional attribute values - * @return a new instance of BoostedTreesQuantileStreamResourceHandleOp - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesQuantileStreamResourceHandleOp create(Scope scope, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesQuantileStreamResourceHandleOp"); - if (options != null) { - for (Options opts : options) { - if (opts.container != null) { - opBuilder.setAttr("container", opts.container); - } - if (opts.sharedName != null) { - opBuilder.setAttr("shared_name", opts.sharedName); - } - } - } - return new BoostedTreesQuantileStreamResourceHandleOp(opBuilder.build()); - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public static Options container(String container) { - return new Options().container(container); - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public static Options sharedName(String sharedName) { - return new Options().sharedName(sharedName); - } - - /** - * Gets resource. - * - * @return resource. - */ - public Output resource() { - return resource; - } - - @Override - @SuppressWarnings("unchecked") - public Output asOutput() { - return (Output) resource; - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesQuantileStreamResourceHandleOp} - */ - public static class Options { - private String container; - - private String sharedName; - - private Options() { - } - - /** - * Sets the container option. - * - * @param container the container option - * @return this Options instance. - */ - public Options container(String container) { - this.container = container; - return this; - } - - /** - * Sets the sharedName option. - * - * @param sharedName the sharedName option - * @return this Options instance. - */ - public Options sharedName(String sharedName) { - this.sharedName = sharedName; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesQuantileStreamResourceHandleOp.class - ) - public static class Inputs extends RawOpInputs { - /** - * The container attribute - */ - public final String container; - - /** - * The sharedName attribute - */ - public final String sharedName; - - public Inputs(GraphOperation op) { - super(new BoostedTreesQuantileStreamResourceHandleOp(op), op, Arrays.asList("container", "shared_name")); - int inputIndex = 0; - container = op.attributes().getAttrString("container"); - sharedName = op.attributes().getAttrString("shared_name"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java deleted file mode 100644 index 65ed6774a92..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TInt64; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * Serializes the tree ensemble to a proto. - */ -@OpMetadata( - opType = BoostedTreesSerializeEnsemble.OP_NAME, - inputsClass = BoostedTreesSerializeEnsemble.Inputs.class -) -public final class BoostedTreesSerializeEnsemble extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesSerializeEnsemble"; - - private Output stampToken; - - private Output treeEnsembleSerialized; - - public BoostedTreesSerializeEnsemble(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - stampToken = operation.output(outputIdx++); - treeEnsembleSerialized = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesSerializeEnsemble operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble. - * @return a new instance of BoostedTreesSerializeEnsemble - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesSerializeEnsemble create(Scope scope, - Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesSerializeEnsemble"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - return new BoostedTreesSerializeEnsemble(opBuilder.build()); - } - - /** - * Gets stampToken. - * Stamp token of the tree ensemble resource. - * @return stampToken. - */ - public Output stampToken() { - return stampToken; - } - - /** - * Gets treeEnsembleSerialized. - * Serialized proto of the ensemble. - * @return treeEnsembleSerialized. - */ - public Output treeEnsembleSerialized() { - return treeEnsembleSerialized; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesSerializeEnsemble.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble. - */ - public final Operand treeEnsembleHandle; - - public Inputs(GraphOperation op) { - super(new BoostedTreesSerializeEnsemble(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java deleted file mode 100644 index 18f56f131e4..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java +++ /dev/null @@ -1,199 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; - -/** - * Aggregates the summary of accumulated stats for the batch. - * The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id. - */ -@OpMetadata( - opType = BoostedTreesSparseAggregateStats.OP_NAME, - inputsClass = BoostedTreesSparseAggregateStats.Inputs.class -) -public final class BoostedTreesSparseAggregateStats extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesSparseAggregateStats"; - - private Output statsSummaryIndices; - - private Output statsSummaryValues; - - private Output statsSummaryShape; - - public BoostedTreesSparseAggregateStats(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - statsSummaryIndices = operation.output(outputIdx++); - statsSummaryValues = operation.output(outputIdx++); - statsSummaryShape = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesSparseAggregateStats operation. - * - * @param scope current scope - * @param nodeIds int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - * @param gradients float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - * @param hessians float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - * @param featureIndices int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). - * Number of sparse entries across all instances from the batch. The first value is - * the index of the instance, the second is dimension of the feature. The second axis - * can only have 2 values, i.e., the input dense version of Tensor can only be matrix. - * @param featureValues int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). - * Number of sparse entries across all instances from the batch. The first value is - * the index of the instance, the second is dimension of the feature. - * @param featureShape int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). - * The first axis can only have 2 values, [batch_size, feature_dimension]. - * @param maxSplits int; the maximum number of splits possible in the whole tree. - * @param numBuckets int; equals to the maximum possible value of bucketized feature + 1. - * @return a new instance of BoostedTreesSparseAggregateStats - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesSparseAggregateStats create(Scope scope, Operand nodeIds, - Operand gradients, Operand hessians, Operand featureIndices, - Operand featureValues, Operand featureShape, Long maxSplits, - Long numBuckets) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesSparseAggregateStats"); - opBuilder.addInput(nodeIds.asOutput()); - opBuilder.addInput(gradients.asOutput()); - opBuilder.addInput(hessians.asOutput()); - opBuilder.addInput(featureIndices.asOutput()); - opBuilder.addInput(featureValues.asOutput()); - opBuilder.addInput(featureShape.asOutput()); - opBuilder.setAttr("max_splits", maxSplits); - opBuilder.setAttr("num_buckets", numBuckets); - return new BoostedTreesSparseAggregateStats(opBuilder.build()); - } - - /** - * Gets statsSummaryIndices. - * int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4]) - * The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension. - * statistics_dimension = logits_dimension + hessian_dimension. - * @return statsSummaryIndices. - */ - public Output statsSummaryIndices() { - return statsSummaryIndices; - } - - /** - * Gets statsSummaryValues. - * output Rank 1 Tensor (shape=[number of non zero statistics]) - * @return statsSummaryValues. - */ - public Output statsSummaryValues() { - return statsSummaryValues; - } - - /** - * Gets statsSummaryShape. - * output Rank 1 Tensor (shape=[4]) - * The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension], - * where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension - * is the same as label_dimension, i.e., the output space. hessian_dimension can be the same - * as logits dimension when diagonal hessian is used, or label_dimension^2 when full - * hessian is used. - * @return statsSummaryShape. - */ - public Output statsSummaryShape() { - return statsSummaryShape; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesSparseAggregateStats.class - ) - public static class Inputs extends RawOpInputs { - /** - * int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - */ - public final Operand nodeIds; - - /** - * float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - */ - public final Operand gradients; - - /** - * float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - */ - public final Operand hessians; - - /** - * int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). - * Number of sparse entries across all instances from the batch. The first value is - * the index of the instance, the second is dimension of the feature. The second axis - * can only have 2 values, i.e., the input dense version of Tensor can only be matrix. - */ - public final Operand featureIndices; - - /** - * int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). - * Number of sparse entries across all instances from the batch. The first value is - * the index of the instance, the second is dimension of the feature. - */ - public final Operand featureValues; - - /** - * int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). - * The first axis can only have 2 values, [batch_size, feature_dimension]. - */ - public final Operand featureShape; - - /** - * int; the maximum number of splits possible in the whole tree. - */ - public final long maxSplits; - - /** - * int; equals to the maximum possible value of bucketized feature + 1. - */ - public final long numBuckets; - - public Inputs(GraphOperation op) { - super(new BoostedTreesSparseAggregateStats(op), op, Arrays.asList("max_splits", "num_buckets")); - int inputIndex = 0; - nodeIds = (Operand) op.input(inputIndex++); - gradients = (Operand) op.input(inputIndex++); - hessians = (Operand) op.input(inputIndex++); - featureIndices = (Operand) op.input(inputIndex++); - featureValues = (Operand) op.input(inputIndex++); - featureShape = (Operand) op.input(inputIndex++); - maxSplits = op.attributes().getAttrInt("max_splits"); - numBuckets = op.attributes().getAttrInt("num_buckets"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java deleted file mode 100644 index 3803c2aa456..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java +++ /dev/null @@ -1,290 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; - -/** - * Calculates gains for each feature and returns the best possible split information for the feature. - * The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - *

      It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return {@code node_ids_list} for each feature, containing the list of nodes that this feature can be used to split. - *

      In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - *

      The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. - */ -@OpMetadata( - opType = BoostedTreesSparseCalculateBestFeatureSplit.OP_NAME, - inputsClass = BoostedTreesSparseCalculateBestFeatureSplit.Inputs.class -) -public final class BoostedTreesSparseCalculateBestFeatureSplit extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesSparseCalculateBestFeatureSplit"; - - private Output nodeIds; - - private Output gains; - - private Output featureDimensions; - - private Output thresholds; - - private Output leftNodeContribs; - - private Output rightNodeContribs; - - private Output splitWithDefaultDirections; - - public BoostedTreesSparseCalculateBestFeatureSplit(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - nodeIds = operation.output(outputIdx++); - gains = operation.output(outputIdx++); - featureDimensions = operation.output(outputIdx++); - thresholds = operation.output(outputIdx++); - leftNodeContribs = operation.output(outputIdx++); - rightNodeContribs = operation.output(outputIdx++); - splitWithDefaultDirections = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesSparseCalculateBestFeatureSplit operation. - * - * @param scope current scope - * @param nodeIdRange A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - * @param statsSummaryIndices A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. - * stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used. - * @param statsSummaryValues A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. - * @param statsSummaryShape A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. - * @param l1 l1 regularization factor on leaf weights, per instance based. - * @param l2 l2 regularization factor on leaf weights, per instance based. - * @param treeComplexity adjustment to the gain, per leaf based. - * @param minNodeWeight minimum avg of hessians in a node before required for the node to be considered for splitting. - * @param logitsDimension The dimension of logit, i.e., number of classes. - * @param options carries optional attribute values - * @return a new instance of BoostedTreesSparseCalculateBestFeatureSplit - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesSparseCalculateBestFeatureSplit create(Scope scope, - Operand nodeIdRange, Operand statsSummaryIndices, - Operand statsSummaryValues, Operand statsSummaryShape, Operand l1, - Operand l2, Operand treeComplexity, Operand minNodeWeight, - Long logitsDimension, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesSparseCalculateBestFeatureSplit"); - opBuilder.addInput(nodeIdRange.asOutput()); - opBuilder.addInput(statsSummaryIndices.asOutput()); - opBuilder.addInput(statsSummaryValues.asOutput()); - opBuilder.addInput(statsSummaryShape.asOutput()); - opBuilder.addInput(l1.asOutput()); - opBuilder.addInput(l2.asOutput()); - opBuilder.addInput(treeComplexity.asOutput()); - opBuilder.addInput(minNodeWeight.asOutput()); - opBuilder.setAttr("logits_dimension", logitsDimension); - if (options != null) { - for (Options opts : options) { - if (opts.splitType != null) { - opBuilder.setAttr("split_type", opts.splitType); - } - } - } - return new BoostedTreesSparseCalculateBestFeatureSplit(opBuilder.build()); - } - - /** - * Sets the splitType option. - * - * @param splitType A string indicating if this Op should perform inequality split or equality split. - * @return this Options instance. - */ - public static Options splitType(String splitType) { - return new Options().splitType(splitType); - } - - /** - * Gets nodeIds. - * A Rank 1 tensor indicating possible node ids that can be split. - * @return nodeIds. - */ - public Output nodeIds() { - return nodeIds; - } - - /** - * Gets gains. - * A Rank 1 tensor indicating the best gains to split each node. - * @return gains. - */ - public Output gains() { - return gains; - } - - /** - * Gets featureDimensions. - * A Rank 1 tensor indicating the best feature dimension for each feature to split for each node. - * @return featureDimensions. - */ - public Output featureDimensions() { - return featureDimensions; - } - - /** - * Gets thresholds. - * A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node. - * @return thresholds. - */ - public Output thresholds() { - return thresholds; - } - - /** - * Gets leftNodeContribs. - * A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature. - * This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension. - * @return leftNodeContribs. - */ - public Output leftNodeContribs() { - return leftNodeContribs; - } - - /** - * Gets rightNodeContribs. - * A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - * @return rightNodeContribs. - */ - public Output rightNodeContribs() { - return rightNodeContribs; - } - - /** - * Gets splitWithDefaultDirections. - * A Rank 1 tensor indicating which direction to go if data is missing. - * Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. - * @return splitWithDefaultDirections. - */ - public Output splitWithDefaultDirections() { - return splitWithDefaultDirections; - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesSparseCalculateBestFeatureSplit} - */ - public static class Options { - private String splitType; - - private Options() { - } - - /** - * Sets the splitType option. - * - * @param splitType A string indicating if this Op should perform inequality split or equality split. - * @return this Options instance. - */ - public Options splitType(String splitType) { - this.splitType = splitType; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesSparseCalculateBestFeatureSplit.class - ) - public static class Inputs extends RawOpInputs { - /** - * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). - */ - public final Operand nodeIdRange; - - /** - * A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. - * stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used. - */ - public final Operand statsSummaryIndices; - - /** - * A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. - */ - public final Operand statsSummaryValues; - - /** - * A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. - */ - public final Operand statsSummaryShape; - - /** - * l1 regularization factor on leaf weights, per instance based. - */ - public final Operand l1; - - /** - * l2 regularization factor on leaf weights, per instance based. - */ - public final Operand l2; - - /** - * adjustment to the gain, per leaf based. - */ - public final Operand treeComplexity; - - /** - * minimum avg of hessians in a node before required for the node to be considered for splitting. - */ - public final Operand minNodeWeight; - - /** - * The dimension of logit, i.e., number of classes. - */ - public final long logitsDimension; - - /** - * A string indicating if this Op should perform inequality split or equality split. - */ - public final String splitType; - - public Inputs(GraphOperation op) { - super(new BoostedTreesSparseCalculateBestFeatureSplit(op), op, Arrays.asList("logits_dimension", "split_type")); - int inputIndex = 0; - nodeIdRange = (Operand) op.input(inputIndex++); - statsSummaryIndices = (Operand) op.input(inputIndex++); - statsSummaryValues = (Operand) op.input(inputIndex++); - statsSummaryShape = (Operand) op.input(inputIndex++); - l1 = (Operand) op.input(inputIndex++); - l2 = (Operand) op.input(inputIndex++); - treeComplexity = (Operand) op.input(inputIndex++); - minNodeWeight = (Operand) op.input(inputIndex++); - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - splitType = op.attributes().getAttrString("split_type"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java deleted file mode 100644 index 8fb083ec5e5..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java +++ /dev/null @@ -1,171 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TType; - -/** - * Runs multiple additive regression ensemble predictors on input instances and - * computes the update to cached logits. It is designed to be used during training. - * It traverses the trees starting from cached tree id and cached node id and - * calculates the updates to be pushed to the cache. - */ -@OpMetadata( - opType = BoostedTreesTrainingPredict.OP_NAME, - inputsClass = BoostedTreesTrainingPredict.Inputs.class -) -public final class BoostedTreesTrainingPredict extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesTrainingPredict"; - - private Output partialLogits; - - private Output treeIds; - - private Output nodeIds; - - public BoostedTreesTrainingPredict(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - partialLogits = operation.output(outputIdx++); - treeIds = operation.output(outputIdx++); - nodeIds = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesTrainingPredict operation. - * - * @param scope current scope - * @param treeEnsembleHandle The treeEnsembleHandle value - * @param cachedTreeIds Rank 1 Tensor containing cached tree ids which is the starting - * tree of prediction. - * @param cachedNodeIds Rank 1 Tensor containing cached node id which is the starting - * node of prediction. - * @param bucketizedFeatures A list of rank 1 Tensors containing bucket id for each - * feature. - * @param logitsDimension scalar, dimension of the logits, to be used for partial logits - * shape. - * @return a new instance of BoostedTreesTrainingPredict - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesTrainingPredict create(Scope scope, - Operand treeEnsembleHandle, Operand cachedTreeIds, - Operand cachedNodeIds, Iterable> bucketizedFeatures, - Long logitsDimension) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesTrainingPredict"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInput(cachedTreeIds.asOutput()); - opBuilder.addInput(cachedNodeIds.asOutput()); - opBuilder.addInputList(Operands.asOutputs(bucketizedFeatures)); - opBuilder.setAttr("logits_dimension", logitsDimension); - return new BoostedTreesTrainingPredict(opBuilder.build()); - } - - /** - * Gets partialLogits. - * Rank 2 Tensor containing logits update (with respect to cached - * values stored) for each example. - * @return partialLogits. - */ - public Output partialLogits() { - return partialLogits; - } - - /** - * Gets treeIds. - * Rank 1 Tensor containing new tree ids for each example. - * @return treeIds. - */ - public Output treeIds() { - return treeIds; - } - - /** - * Gets nodeIds. - * Rank 1 Tensor containing new node ids in the new tree_ids. - * @return nodeIds. - */ - public Output nodeIds() { - return nodeIds; - } - - @OpInputsMetadata( - outputsClass = BoostedTreesTrainingPredict.class - ) - public static class Inputs extends RawOpInputs { - /** - * The treeEnsembleHandle input - */ - public final Operand treeEnsembleHandle; - - /** - * Rank 1 Tensor containing cached tree ids which is the starting - * tree of prediction. - */ - public final Operand cachedTreeIds; - - /** - * Rank 1 Tensor containing cached node id which is the starting - * node of prediction. - */ - public final Operand cachedNodeIds; - - /** - * A list of rank 1 Tensors containing bucket id for each - * feature. - */ - public final Iterable> bucketizedFeatures; - - /** - * scalar, dimension of the logits, to be used for partial logits - * shape. - */ - public final long logitsDimension; - - public Inputs(GraphOperation op) { - super(new BoostedTreesTrainingPredict(op), op, Arrays.asList("logits_dimension")); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - cachedTreeIds = (Operand) op.input(inputIndex++); - cachedNodeIds = (Operand) op.input(inputIndex++); - int bucketizedFeaturesLength = op.inputListLength("bucketized_features"); - bucketizedFeatures = Arrays.asList((Operand[]) op.inputList(inputIndex, bucketizedFeaturesLength)); - inputIndex += bucketizedFeaturesLength; - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java deleted file mode 100644 index df6017e180b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java +++ /dev/null @@ -1,188 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TType; - -/** - * Updates the tree ensemble by either adding a layer to the last tree being grown - * or by starting a new tree. - */ -@OpMetadata( - opType = BoostedTreesUpdateEnsemble.OP_NAME, - inputsClass = BoostedTreesUpdateEnsemble.Inputs.class -) -public final class BoostedTreesUpdateEnsemble extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesUpdateEnsemble"; - - public BoostedTreesUpdateEnsemble(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesUpdateEnsemble operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the ensemble variable. - * @param featureIds Rank 1 tensor with ids for each feature. This is the real id of - * the feature that will be used in the split. - * @param nodeIds List of rank 1 tensors representing the nodes for which this feature - * has a split. - * @param gains List of rank 1 tensors representing the gains for each of the feature's - * split. - * @param thresholds List of rank 1 tensors representing the thesholds for each of the - * feature's split. - * @param leftNodeContribs List of rank 2 tensors with left leaf contribs for each of - * the feature's splits. Will be added to the previous node values to constitute - * the values of the left nodes. - * @param rightNodeContribs List of rank 2 tensors with right leaf contribs for each - * of the feature's splits. Will be added to the previous node values to constitute - * the values of the right nodes. - * @param maxDepth Max depth of the tree to build. - * @param learningRate shrinkage const for each new tree. - * @param pruningMode 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - * @return a new instance of BoostedTreesUpdateEnsemble - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesUpdateEnsemble create(Scope scope, - Operand treeEnsembleHandle, Operand featureIds, - Iterable> nodeIds, Iterable> gains, - Iterable> thresholds, Iterable> leftNodeContribs, - Iterable> rightNodeContribs, Operand maxDepth, - Operand learningRate, Long pruningMode) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesUpdateEnsemble"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInput(featureIds.asOutput()); - opBuilder.addInputList(Operands.asOutputs(nodeIds)); - opBuilder.addInputList(Operands.asOutputs(gains)); - opBuilder.addInputList(Operands.asOutputs(thresholds)); - opBuilder.addInputList(Operands.asOutputs(leftNodeContribs)); - opBuilder.addInputList(Operands.asOutputs(rightNodeContribs)); - opBuilder.addInput(maxDepth.asOutput()); - opBuilder.addInput(learningRate.asOutput()); - opBuilder.setAttr("pruning_mode", pruningMode); - return new BoostedTreesUpdateEnsemble(opBuilder.build()); - } - - @OpInputsMetadata( - outputsClass = BoostedTreesUpdateEnsemble.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the ensemble variable. - */ - public final Operand treeEnsembleHandle; - - /** - * Rank 1 tensor with ids for each feature. This is the real id of - * the feature that will be used in the split. - */ - public final Operand featureIds; - - /** - * List of rank 1 tensors representing the nodes for which this feature - * has a split. - */ - public final Iterable> nodeIds; - - /** - * List of rank 1 tensors representing the gains for each of the feature's - * split. - */ - public final Iterable> gains; - - /** - * List of rank 1 tensors representing the thesholds for each of the - * feature's split. - */ - public final Iterable> thresholds; - - /** - * List of rank 2 tensors with left leaf contribs for each of - * the feature's splits. Will be added to the previous node values to constitute - * the values of the left nodes. - */ - public final Iterable> leftNodeContribs; - - /** - * List of rank 2 tensors with right leaf contribs for each - * of the feature's splits. Will be added to the previous node values to constitute - * the values of the right nodes. - */ - public final Iterable> rightNodeContribs; - - /** - * Max depth of the tree to build. - */ - public final Operand maxDepth; - - /** - * shrinkage const for each new tree. - */ - public final Operand learningRate; - - /** - * 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - */ - public final long pruningMode; - - public Inputs(GraphOperation op) { - super(new BoostedTreesUpdateEnsemble(op), op, Arrays.asList("pruning_mode")); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - featureIds = (Operand) op.input(inputIndex++); - int nodeIdsLength = op.inputListLength("node_ids"); - nodeIds = Arrays.asList((Operand[]) op.inputList(inputIndex, nodeIdsLength)); - inputIndex += nodeIdsLength; - int gainsLength = op.inputListLength("gains"); - gains = Arrays.asList((Operand[]) op.inputList(inputIndex, gainsLength)); - inputIndex += gainsLength; - int thresholdsLength = op.inputListLength("thresholds"); - thresholds = Arrays.asList((Operand[]) op.inputList(inputIndex, thresholdsLength)); - inputIndex += thresholdsLength; - int leftNodeContribsLength = op.inputListLength("left_node_contribs"); - leftNodeContribs = Arrays.asList((Operand[]) op.inputList(inputIndex, leftNodeContribsLength)); - inputIndex += leftNodeContribsLength; - int rightNodeContribsLength = op.inputListLength("right_node_contribs"); - rightNodeContribs = Arrays.asList((Operand[]) op.inputList(inputIndex, rightNodeContribsLength)); - inputIndex += rightNodeContribsLength; - maxDepth = (Operand) op.input(inputIndex++); - learningRate = (Operand) op.input(inputIndex++); - pruningMode = op.attributes().getAttrInt("pruning_mode"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java deleted file mode 100644 index c1c35fd932c..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java +++ /dev/null @@ -1,287 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.TString; -import org.tensorflow.types.family.TType; - -/** - * Updates the tree ensemble by adding a layer to the last tree being grown - * or by starting a new tree. - */ -@OpMetadata( - opType = BoostedTreesUpdateEnsembleV2.OP_NAME, - inputsClass = BoostedTreesUpdateEnsembleV2.Inputs.class -) -public final class BoostedTreesUpdateEnsembleV2 extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "BoostedTreesUpdateEnsembleV2"; - - public BoostedTreesUpdateEnsembleV2(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new BoostedTreesUpdateEnsembleV2 operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the ensemble variable. - * @param featureIds Rank 1 tensor with ids for each feature. This is the real id of - * the feature that will be used in the split. - * @param dimensionIds List of rank 1 tensors representing the dimension in each feature. - * @param nodeIds List of rank 1 tensors representing the nodes for which this feature - * has a split. - * @param gains List of rank 1 tensors representing the gains for each of the feature's - * split. - * @param thresholds List of rank 1 tensors representing the thesholds for each of the - * feature's split. - * @param leftNodeContribs List of rank 2 tensors with left leaf contribs for each of - * the feature's splits. Will be added to the previous node values to constitute - * the values of the left nodes. - * @param rightNodeContribs List of rank 2 tensors with right leaf contribs for each - * of the feature's splits. Will be added to the previous node values to constitute - * the values of the right nodes. - * @param splitTypes List of rank 1 tensors representing the split type for each feature. - * @param maxDepth Max depth of the tree to build. - * @param learningRate shrinkage const for each new tree. - * @param pruningMode 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - * @param options carries optional attribute values - * @return a new instance of BoostedTreesUpdateEnsembleV2 - */ - @Endpoint( - describeByClass = true - ) - public static BoostedTreesUpdateEnsembleV2 create(Scope scope, - Operand treeEnsembleHandle, Iterable> featureIds, - Iterable> dimensionIds, Iterable> nodeIds, - Iterable> gains, Iterable> thresholds, - Iterable> leftNodeContribs, Iterable> rightNodeContribs, - Iterable> splitTypes, Operand maxDepth, - Operand learningRate, Operand pruningMode, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BoostedTreesUpdateEnsembleV2"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - opBuilder.addInputList(Operands.asOutputs(featureIds)); - opBuilder.addInputList(Operands.asOutputs(dimensionIds)); - opBuilder.addInputList(Operands.asOutputs(nodeIds)); - opBuilder.addInputList(Operands.asOutputs(gains)); - opBuilder.addInputList(Operands.asOutputs(thresholds)); - opBuilder.addInputList(Operands.asOutputs(leftNodeContribs)); - opBuilder.addInputList(Operands.asOutputs(rightNodeContribs)); - opBuilder.addInputList(Operands.asOutputs(splitTypes)); - opBuilder.addInput(maxDepth.asOutput()); - opBuilder.addInput(learningRate.asOutput()); - opBuilder.addInput(pruningMode.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.logitsDimension != null) { - opBuilder.setAttr("logits_dimension", opts.logitsDimension); - } - if (opts.numGroups != null) { - opBuilder.setAttr("num_groups", opts.numGroups); - } - } - } - return new BoostedTreesUpdateEnsembleV2(opBuilder.build()); - } - - /** - * Sets the logitsDimension option. - * - * @param logitsDimension scalar, dimension of the logits - * @return this Options instance. - */ - public static Options logitsDimension(Long logitsDimension) { - return new Options().logitsDimension(logitsDimension); - } - - /** - * Sets the numGroups option. - * - * @param numGroups Number of groups of split information to process, where a group contains feature - * ids that are processed together in BoostedTreesCalculateBestFeatureSplitOpV2. - * INFERRED. - * @return this Options instance. - */ - public static Options numGroups(Long numGroups) { - return new Options().numGroups(numGroups); - } - - /** - * Optional attributes for {@link org.tensorflow.op.estimator.BoostedTreesUpdateEnsembleV2} - */ - public static class Options { - private Long logitsDimension; - - private Long numGroups; - - private Options() { - } - - /** - * Sets the logitsDimension option. - * - * @param logitsDimension scalar, dimension of the logits - * @return this Options instance. - */ - public Options logitsDimension(Long logitsDimension) { - this.logitsDimension = logitsDimension; - return this; - } - - /** - * Sets the numGroups option. - * - * @param numGroups Number of groups of split information to process, where a group contains feature - * ids that are processed together in BoostedTreesCalculateBestFeatureSplitOpV2. - * INFERRED. - * @return this Options instance. - */ - public Options numGroups(Long numGroups) { - this.numGroups = numGroups; - return this; - } - } - - @OpInputsMetadata( - outputsClass = BoostedTreesUpdateEnsembleV2.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the ensemble variable. - */ - public final Operand treeEnsembleHandle; - - /** - * Rank 1 tensor with ids for each feature. This is the real id of - * the feature that will be used in the split. - */ - public final Iterable> featureIds; - - /** - * List of rank 1 tensors representing the dimension in each feature. - */ - public final Iterable> dimensionIds; - - /** - * List of rank 1 tensors representing the nodes for which this feature - * has a split. - */ - public final Iterable> nodeIds; - - /** - * List of rank 1 tensors representing the gains for each of the feature's - * split. - */ - public final Iterable> gains; - - /** - * List of rank 1 tensors representing the thesholds for each of the - * feature's split. - */ - public final Iterable> thresholds; - - /** - * List of rank 2 tensors with left leaf contribs for each of - * the feature's splits. Will be added to the previous node values to constitute - * the values of the left nodes. - */ - public final Iterable> leftNodeContribs; - - /** - * List of rank 2 tensors with right leaf contribs for each - * of the feature's splits. Will be added to the previous node values to constitute - * the values of the right nodes. - */ - public final Iterable> rightNodeContribs; - - /** - * List of rank 1 tensors representing the split type for each feature. - */ - public final Iterable> splitTypes; - - /** - * Max depth of the tree to build. - */ - public final Operand maxDepth; - - /** - * shrinkage const for each new tree. - */ - public final Operand learningRate; - - /** - * 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - */ - public final Operand pruningMode; - - /** - * scalar, dimension of the logits - */ - public final long logitsDimension; - - public Inputs(GraphOperation op) { - super(new BoostedTreesUpdateEnsembleV2(op), op, Arrays.asList("logits_dimension")); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - int featureIdsLength = op.inputListLength("feature_ids"); - featureIds = Arrays.asList((Operand[]) op.inputList(inputIndex, featureIdsLength)); - inputIndex += featureIdsLength; - int dimensionIdsLength = op.inputListLength("dimension_ids"); - dimensionIds = Arrays.asList((Operand[]) op.inputList(inputIndex, dimensionIdsLength)); - inputIndex += dimensionIdsLength; - int nodeIdsLength = op.inputListLength("node_ids"); - nodeIds = Arrays.asList((Operand[]) op.inputList(inputIndex, nodeIdsLength)); - inputIndex += nodeIdsLength; - int gainsLength = op.inputListLength("gains"); - gains = Arrays.asList((Operand[]) op.inputList(inputIndex, gainsLength)); - inputIndex += gainsLength; - int thresholdsLength = op.inputListLength("thresholds"); - thresholds = Arrays.asList((Operand[]) op.inputList(inputIndex, thresholdsLength)); - inputIndex += thresholdsLength; - int leftNodeContribsLength = op.inputListLength("left_node_contribs"); - leftNodeContribs = Arrays.asList((Operand[]) op.inputList(inputIndex, leftNodeContribsLength)); - inputIndex += leftNodeContribsLength; - int rightNodeContribsLength = op.inputListLength("right_node_contribs"); - rightNodeContribs = Arrays.asList((Operand[]) op.inputList(inputIndex, rightNodeContribsLength)); - inputIndex += rightNodeContribsLength; - int splitTypesLength = op.inputListLength("split_types"); - splitTypes = Arrays.asList((Operand[]) op.inputList(inputIndex, splitTypesLength)); - inputIndex += splitTypesLength; - maxDepth = (Operand) op.input(inputIndex++); - learningRate = (Operand) op.input(inputIndex++); - pruningMode = (Operand) op.input(inputIndex++); - logitsDimension = op.attributes().getAttrInt("logits_dimension"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java deleted file mode 100644 index 23ed27e107a..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TType; - -/** - * Checks whether a tree ensemble has been initialized. - */ -@OpMetadata( - opType = IsBoostedTreesEnsembleInitialized.OP_NAME, - inputsClass = IsBoostedTreesEnsembleInitialized.Inputs.class -) -public final class IsBoostedTreesEnsembleInitialized extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "IsBoostedTreesEnsembleInitialized"; - - private Output isInitialized; - - public IsBoostedTreesEnsembleInitialized(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - isInitialized = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new IsBoostedTreesEnsembleInitialized operation. - * - * @param scope current scope - * @param treeEnsembleHandle Handle to the tree ensemble resource. - * @return a new instance of IsBoostedTreesEnsembleInitialized - */ - @Endpoint( - describeByClass = true - ) - public static IsBoostedTreesEnsembleInitialized create(Scope scope, - Operand treeEnsembleHandle) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IsBoostedTreesEnsembleInitialized"); - opBuilder.addInput(treeEnsembleHandle.asOutput()); - return new IsBoostedTreesEnsembleInitialized(opBuilder.build()); - } - - /** - * Gets isInitialized. - * output boolean on whether it is initialized or not. - * @return isInitialized. - */ - public Output isInitialized() { - return isInitialized; - } - - @Override - public Output asOutput() { - return isInitialized; - } - - @OpInputsMetadata( - outputsClass = IsBoostedTreesEnsembleInitialized.class - ) - public static class Inputs extends RawOpInputs { - /** - * Handle to the tree ensemble resource. - */ - public final Operand treeEnsembleHandle; - - public Inputs(GraphOperation op) { - super(new IsBoostedTreesEnsembleInitialized(op), op, Arrays.asList()); - int inputIndex = 0; - treeEnsembleHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java deleted file mode 100644 index 6145156ddbe..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.estimator; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TType; - -/** - * Checks whether a quantile stream has been initialized. - * An Op that checks if quantile stream resource is initialized. - */ -@OpMetadata( - opType = IsBoostedTreesQuantileStreamResourceInitialized.OP_NAME, - inputsClass = IsBoostedTreesQuantileStreamResourceInitialized.Inputs.class -) -public final class IsBoostedTreesQuantileStreamResourceInitialized extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "IsBoostedTreesQuantileStreamResourceInitialized"; - - private Output isInitialized; - - public IsBoostedTreesQuantileStreamResourceInitialized(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - isInitialized = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new IsBoostedTreesQuantileStreamResourceInitialized operation. - * - * @param scope current scope - * @param quantileStreamResourceHandle resource; The reference to quantile stream resource handle. - * @return a new instance of IsBoostedTreesQuantileStreamResourceInitialized - */ - @Endpoint( - describeByClass = true - ) - public static IsBoostedTreesQuantileStreamResourceInitialized create(Scope scope, - Operand quantileStreamResourceHandle) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IsBoostedTreesQuantileStreamResourceInitialized"); - opBuilder.addInput(quantileStreamResourceHandle.asOutput()); - return new IsBoostedTreesQuantileStreamResourceInitialized(opBuilder.build()); - } - - /** - * Gets isInitialized. - * bool; True if the resource is initialized, False otherwise. - * @return isInitialized. - */ - public Output isInitialized() { - return isInitialized; - } - - @Override - public Output asOutput() { - return isInitialized; - } - - @OpInputsMetadata( - outputsClass = IsBoostedTreesQuantileStreamResourceInitialized.class - ) - public static class Inputs extends RawOpInputs { - /** - * resource; The reference to quantile stream resource handle. - */ - public final Operand quantileStreamResourceHandle; - - public Inputs(GraphOperation op) { - super(new IsBoostedTreesQuantileStreamResourceInitialized(op), op, Arrays.asList()); - int inputIndex = 0; - quantileStreamResourceHandle = (Operand) op.input(inputIndex++); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java index 0a6a141c036..123c74afd50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java @@ -43,8 +43,6 @@ *

      For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * {@code (x - mean) * contrast_factor + mean}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustContrast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java index 45fe50175c4..b0001085638 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java @@ -41,8 +41,6 @@ *

      The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustHue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java index a7fea42d8fb..5f0c063dc1d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java @@ -41,8 +41,6 @@ *

      The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustSaturation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java index 3f0811e69cc..d3dff6d2a2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java @@ -278,10 +278,10 @@ public static class Inputs extends RawOpInputs { /** * If false, the output nmsed boxes, scores and classes - * are padded/clipped to `max_total_size`. If true, the + * are padded/clipped to {@code max_total_size}. If true, the * output nmsed boxes, scores and classes are padded to be of length - * `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in - * which case it is clipped to `max_total_size`. Defaults to false. + * {@code max_size_per_class}*{@code num_classes}, unless it exceeds {@code max_total_size} in + * which case it is clipped to {@code max_total_size}. Defaults to false. */ public final boolean padPerClass; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java index 371015feb31..68289e7cdb3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java @@ -239,7 +239,7 @@ public static class Inputs extends RawOpInputs { /** * A string specifying the sampling method for resizing. It can be either - * `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling + * {@code "bilinear"} or {@code "nearest"} and default to {@code "bilinear"}. Currently two sampling * methods are supported: Bilinear and Nearest Neighbor. */ public final String method; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java index 59e98a3252d..e639b0f2cb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java @@ -38,8 +38,6 @@ /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CropAndResizeGradImage.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java index dbc4a9e33b3..9cf114193b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java @@ -332,9 +332,9 @@ public static class Inputs extends RawOpInputs { /** * string specifying a hint about the algorithm used for - * decompression. Defaults to "" which maps to a system-specific - * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java index ae91e89973a..a5c7ee7845e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java @@ -53,8 +53,6 @@ * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. - * - * @param data type for {@code image} output */ @OpMetadata( opType = DecodeImage.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java index a6c1f60fc24..4deb6ce61e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java @@ -323,9 +323,9 @@ public static class Inputs extends RawOpInputs { /** * string specifying a hint about the algorithm used for - * decompression. Defaults to "" which maps to a system-specific - * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java index db44c3b3146..dd6384caf7c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java @@ -51,8 +51,6 @@ * of color channels. *

      This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. - * - * @param data type for {@code image} output */ @OpMetadata( opType = DecodePng.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java index 8033cecb4c9..56c64a5e50c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java @@ -45,8 +45,6 @@ * box is {@code [0.1, 0.2, 0.5, 0.9]}, the upper-left and bottom-right coordinates of * the bounding box will be {@code (40, 10)} to {@code (100, 50)} (in (x,y) coordinates). *

      Parts of the bounding box may fall outside the image. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DrawBoundingBoxes.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java index 6f3468d21d0..7ff1e224e6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java @@ -387,8 +387,8 @@ public static class Inputs extends RawOpInputs { public final boolean chromaDownsampling; /** - * Unit used to specify `x_density` and `y_density`: - * pixels per inch (`'in'`) or centimeter (`'cm'`). + * Unit used to specify {@code x_density} and {@code y_density}: + * pixels per inch ({@code 'in'}) or centimeter ({@code 'cm'}). */ public final String densityUnit; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java index f86cf256d0c..44daff67647 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -58,6 +59,9 @@ opType = ExtractGlimpse.OP_NAME, inputsClass = ExtractGlimpse.Inputs.class ) +@Operator( + group = "image" +) public final class ExtractGlimpse extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -281,9 +285,9 @@ public static class Inputs extends RawOpInputs { public final boolean uniformNoise; /** - * indicates if the noise should `uniform`, `gaussian`, or - * `zero`. The default is `uniform` which means the noise type - * will be decided by `uniform_noise`. + * indicates if the noise should {@code uniform}, {@code gaussian}, or + * {@code zero}. The default is {@code uniform} which means the noise type + * will be decided by {@code uniform_noise}. */ public final String noise; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java index 375abcd8371..54395a44acc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java @@ -36,8 +36,6 @@ /** * Extract {@code patches} from {@code images} and put them in the "depth" output dimension. - * - * @param data type for {@code patches} output */ @OpMetadata( opType = ExtractImagePatches.OP_NAME, @@ -130,23 +128,23 @@ public static class Inputs extends RawOpInputs images; /** - * The size of the sliding window for each dimension of `images`. + * The size of the sliding window for each dimension of {@code images}. */ public final long[] ksizes; /** * How far the centers of two consecutive patches are in - * the images. Must be: `[1, stride_rows, stride_cols, 1]`. + * the images. Must be: {@code [1, stride_rows, stride_cols, 1]}. */ public final long[] strides; /** - * Must be: `[1, rate_rows, rate_cols, 1]`. This is the + * Must be: {@code [1, rate_rows, rate_cols, 1]}. This is the * input stride, specifying how far two consecutive patch samples are in the * input. Equivalent to extracting patches with - * `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by - * subsampling them spatially by a factor of `rates`. This is equivalent to - * `rate` in dilated (a.k.a. Atrous) convolutions. + * {@code patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)}, followed by + * subsampling them spatially by a factor of {@code rates}. This is equivalent to + * {@code rate} in dilated (a.k.a. Atrous) convolutions. */ public final long[] rates; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java index 368fe5cfd02..4ca887e7e72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java @@ -39,8 +39,6 @@ /** * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. - * - * @param data type for {@code image_shape} output */ @OpMetadata( opType = ExtractJpegShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java index fddbed0f2a5..9b8b8c1d7dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -52,6 +53,9 @@ opType = GenerateBoundingBoxProposals.OP_NAME, inputsClass = GenerateBoundingBoxProposals.Inputs.class ) +@Operator( + group = "image" +) public final class GenerateBoundingBoxProposals extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java index 6e32b95ca11..abd3d53d884 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java @@ -39,8 +39,6 @@ * value of the pixels. The output is only well defined if the value in {@code images} * are in {@code [0,1]}. *

      See {@code rgb_to_hsv} for a description of the HSV encoding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = HsvToRgb.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java index 715813065f8..cef590ad519 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -41,13 +42,14 @@ * {@code (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)}, where * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input * image, the output pixel is set to 0. - * - * @param data type for {@code transformed_images} output */ @OpMetadata( opType = ImageProjectiveTransformV2.OP_NAME, inputsClass = ImageProjectiveTransformV2.Inputs.class ) +@Operator( + group = "image" +) public final class ImageProjectiveTransformV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -170,12 +172,12 @@ public static class Inputs extends RawOpInputs data type for {@code transformed_images} output */ @OpMetadata( opType = ImageProjectiveTransformV3.OP_NAME, inputsClass = ImageProjectiveTransformV3.Inputs.class ) +@Operator( + group = "image" +) public final class ImageProjectiveTransformV3 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -177,12 +179,12 @@ public static class Inputs extends RawOpInputs data type for {@code selected_scores} output */ @OpMetadata( opType = NonMaxSuppression.OP_NAME, @@ -242,8 +240,8 @@ public static class Inputs extends RawOpInputs data type for {@code resized_images} output */ @OpMetadata( opType = QuantizedResizeBilinear.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java index 966401d271c..063b7b8f529 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java @@ -41,8 +41,6 @@ *

      This Op picks a random location in {@code image} and crops a {@code height} by {@code width} * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomCrop.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java index 2da91473929..c04fe6d13e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes the gradient of bicubic interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeBicubicGrad.OP_NAME, inputsClass = ResizeBicubicGrad.Inputs.class ) +@Operator( + group = "image" +) public final class ResizeBicubicGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java index 9e8d82364a6..166d6b46de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes the gradient of bilinear interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeBilinearGrad.OP_NAME, inputsClass = ResizeBilinearGrad.Inputs.class ) +@Operator( + group = "image" +) public final class ResizeBilinearGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java index 1fc40174782..355ac564de1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java @@ -36,8 +36,6 @@ /** * Resize {@code images} to {@code size} using nearest neighbor interpolation. - * - * @param data type for {@code resized_images} output */ @OpMetadata( opType = ResizeNearestNeighbor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java index 6b5bbcd2c21..36df9e12b2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; /** * Computes the gradient of nearest neighbor interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeNearestNeighborGrad.OP_NAME, inputsClass = ResizeNearestNeighborGrad.Inputs.class ) +@Operator( + group = "image" +) public final class ResizeNearestNeighborGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java index 3709f0bd4f7..be3c84d9b66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java @@ -56,8 +56,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = RgbToHsv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java index f19ccb004e2..a7378278309 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java @@ -70,8 +70,6 @@ * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. - * - * @param data type for {@code begin} output */ @OpMetadata( opType = SampleDistortedBoundingBox.OP_NAME, @@ -425,8 +423,8 @@ public static class Inputs extends RawOpInputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = ScaleAndTranslateGrad.OP_NAME, inputsClass = ScaleAndTranslateGrad.Inputs.class ) +@Operator( + group = "image" +) public final class ScaleAndTranslateGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java index 49955247c14..31c4de5388d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java @@ -95,8 +95,6 @@ * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. - * - * @param data type for {@code begin} output */ @OpMetadata( opType = StatelessSampleDistortedBoundingBox.OP_NAME, @@ -421,7 +419,7 @@ public static class Inputs extends RawOpInputs { /** * A scalar containing either (i) the empty string (no - * compression), (ii) "ZLIB", or (iii) "GZIP". + * compression), (ii) "ZLIB", or (iii) "GZIP". */ public final String compressionType; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java index 90a8b357b66..07eac6679d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java @@ -38,8 +38,6 @@ /** * Reinterpret the bytes of a string as a vector of numbers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DecodePaddedRaw.OP_NAME, @@ -164,8 +162,8 @@ public static class Inputs extends RawOpInputs> { public final DataType outType; /** - * Whether the input `input_bytes` is in little-endian order. Ignored for - * `out_type` values that are stored in a single byte, like `uint8` + * Whether the input {@code input_bytes} is in little-endian order. Ignored for + * {@code out_type} values that are stored in a single byte, like {@code uint8} */ public final boolean littleEndian; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java index f1405659a91..217c843796f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java @@ -37,8 +37,6 @@ /** * Reinterpret the bytes of a string as a vector of numbers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DecodeRaw.OP_NAME, @@ -155,9 +153,9 @@ public static class Inputs extends RawOpInputs> { public final DataType outType; /** - * Whether the input `bytes` are in little-endian order. - * Ignored for `out_type` values that are stored in a single byte like - * `uint8`. + * Whether the input {@code bytes} are in little-endian order. + * Ignored for {@code out_type} values that are stored in a single byte like + * {@code uint8}. */ public final boolean littleEndian; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java index d86def49dd2..9704bd78d15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java @@ -77,8 +77,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = DeserializeManySparse.OP_NAME, @@ -166,7 +164,7 @@ public static class Inputs extends RawOpInputs> { public final Operand serializedSparse; /** - * The `dtype` of the serialized `SparseTensor` objects. + * The {@code dtype} of the serialized {@code SparseTensor} objects. */ public final DataType dtype; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DisableCopyOnRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DisableCopyOnRead.java index 0e64df025ff..b3117dbe119 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DisableCopyOnRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DisableCopyOnRead.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = DisableCopyOnRead.OP_NAME, inputsClass = DisableCopyOnRead.Inputs.class ) +@Operator( + group = "io" +) public final class DisableCopyOnRead extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FakeQueue.java similarity index 53% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FakeQueue.java index 85ca9fc3508..9000deaef1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FakeQueue.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.io; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -29,80 +29,77 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TType; /** - * The RiscLogicalOr operation + * Deprecated. Do not use. */ @OpMetadata( - opType = RiscLogicalOr.OP_NAME, - inputsClass = RiscLogicalOr.Inputs.class + opType = FakeQueue.OP_NAME, + inputsClass = FakeQueue.Inputs.class ) -public final class RiscLogicalOr extends RawOp implements Operand { +@Operator( + group = "io" +) +public final class FakeQueue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscLogicalOr"; + public static final String OP_NAME = "FakeQueue"; - private Output z; + private Output handle; - public RiscLogicalOr(Operation operation) { + public FakeQueue(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; - z = operation.output(outputIdx++); + handle = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscLogicalOr operation. + * Factory method to create a class wrapping a new FakeQueue operation. * * @param scope current scope - * @param x The x value - * @param y The y value - * @return a new instance of RiscLogicalOr + * @param resource The resource value + * @return a new instance of FakeQueue */ @Endpoint( describeByClass = true ) - public static RiscLogicalOr create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscLogicalOr"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscLogicalOr(opBuilder.build()); + public static FakeQueue create(Scope scope, Operand resource) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FakeQueue"); + opBuilder.addInput(resource.asOutput()); + return new FakeQueue(opBuilder.build()); } /** - * Gets z. + * Gets handle. * - * @return z. + * @return handle. */ - public Output z() { - return z; + public Output handle() { + return handle; } @Override - public Output asOutput() { - return z; + public Output asOutput() { + return handle; } @OpInputsMetadata( - outputsClass = RiscLogicalOr.class + outputsClass = FakeQueue.class ) - public static class Inputs extends RawOpInputs { - /** - * The x input - */ - public final Operand x; - + public static class Inputs extends RawOpInputs { /** - * The y input + * The resource input */ - public final Operand y; + public final Operand resource; public Inputs(GraphOperation op) { - super(new RiscLogicalOr(op), op, Arrays.asList()); + super(new FakeQueue(op), op, Arrays.asList()); int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); + resource = (Operand) op.input(inputIndex++); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java index 6104c068643..624d698e828 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java @@ -286,7 +286,7 @@ public static class Inputs extends RawOpInputs { public final DataType[] Tdense; /** - * A list of `num_sparse` types; the data types of data in each Feature + * A list of {@code num_sparse} types; the data types of data in each Feature * given in sparse_keys. * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). @@ -294,23 +294,23 @@ public static class Inputs extends RawOpInputs { public final DataType[] sparseTypes; /** - * A list of `num_ragged` types; the data types of data in each Feature - * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * A list of {@code num_ragged} types; the data types of data in each Feature + * given in ragged_keys (where {@code num_ragged = sparse_keys.size()}). * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). */ public final DataType[] raggedValueTypes; /** - * A list of `num_ragged` types; the data types of row_splits in each Feature - * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * A list of {@code num_ragged} types; the data types of row_splits in each Feature + * given in ragged_keys (where {@code num_ragged = sparse_keys.size()}). * May be DT_INT32 or DT_INT64. */ public final DataType[] raggedSplitTypes; /** - * A list of `num_dense` shapes; the shapes of data in each Feature - * given in dense_keys (where `num_dense = dense_keys.size()`). + * A list of {@code num_dense} shapes; the shapes of data in each Feature + * given in dense_keys (where {@code num_dense = dense_keys.size()}). * The number of elements in the Feature corresponding to dense_key[j] * must always equal dense_shapes[j].NumEntries(). * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java index cf9588ddf89..ace0959c7ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java @@ -205,7 +205,7 @@ public static class Inputs extends RawOpInputs { public final Iterable> denseDefaults; /** - * A list of `num_sparse` strings. + * A list of {@code num_sparse} strings. * The keys expected in the Examples' features associated with sparse values. */ public final String[] sparseKeys; @@ -217,7 +217,7 @@ public static class Inputs extends RawOpInputs { public final String[] denseKeys; /** - * A list of `num_sparse` types; the data types of data in each + * A list of {@code num_sparse} types; the data types of data in each * Feature given in sparse_keys. * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). @@ -226,7 +226,7 @@ public static class Inputs extends RawOpInputs { /** * The data types of data in each Feature given in dense_keys. - * The length of this list must match the length of `dense_keys`. + * The length of this list must match the length of {@code dense_keys}. * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). */ @@ -234,7 +234,7 @@ public static class Inputs extends RawOpInputs { /** * The shapes of data in each Feature given in dense_keys. - * The length of this list must match the length of `dense_keys`. The + * The length of this list must match the length of {@code dense_keys}. The * number of elements in the Feature corresponding to dense_key[j] must * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java index 66a64b13c0b..039ff1546f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java @@ -37,8 +37,6 @@ /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParseTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java index 253662f2cec..70f9327d112 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java @@ -44,8 +44,6 @@ * {@code SparseTensor} objects going into each row of {@code serialized_sparse} will have * rank {@code R-1}. *

      The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. - * - * @param data type for {@code serialized_sparse} output */ @OpMetadata( opType = SerializeManySparse.OP_NAME, @@ -150,8 +148,8 @@ public static class Inputs extends RawOpInputs> { public final DataType T; /** - * The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * The {@code dtype} to use for serialization; the supported types are {@code string} + * (default) and {@code variant}. */ public final DataType outType; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java index 500b0e055d5..b0c2b5935bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java @@ -38,8 +38,6 @@ /** * Serialize a {@code SparseTensor} into a {@code [3]} {@code Tensor} object. - * - * @param data type for {@code serialized_sparse} output */ @OpMetadata( opType = SerializeSparse.OP_NAME, @@ -144,8 +142,8 @@ public static class Inputs extends RawOpInputs> { public final DataType T; /** - * The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * The {@code dtype} to use for serialization; the supported types are {@code string} + * (default) and {@code variant}. */ public final DataType outType; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java index 34f179ed2b0..a521e77b040 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java @@ -65,8 +65,6 @@ * tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. * tf.linalg.band_part(input, 0, 0) ==> Diagonal. * - * - * @param data type for {@code band} output */ @OpMetadata( opType = BandPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java index a5fba205939..532d4fe148b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * The BandedTriangularSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BandedTriangularSolve.OP_NAME, inputsClass = BandedTriangularSolve.Inputs.class ) +@Operator( + group = "linalg" +) public final class BandedTriangularSolve extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java index 0016839b211..b43cf15b48e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java @@ -35,8 +35,6 @@ /** * The BatchCholesky operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchCholesky.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java index d9ce332f7e2..5e917e740b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java @@ -35,8 +35,6 @@ /** * The BatchCholeskyGrad operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchCholeskyGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java index 55e8a0d6a75..99cb57ff97f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java @@ -36,8 +36,6 @@ /** * The BatchMatrixBandPart operation - * - * @param data type for {@code band} output */ @OpMetadata( opType = BatchMatrixBandPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java index c50a706e073..7f1bd32a749 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDeterminant operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixDeterminant.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java index bba3cae6292..edc731b1f36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDiag operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java index 63e7e0e3026..ac379b960aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDiagPart operation - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = BatchMatrixDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java index 081dab67e8b..009deec3658 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java @@ -35,8 +35,10 @@ /** * The BatchMatrixInverse operation - * - * @param data type for {@code output} output + * DEPRECATED: This operation is deprecated and will be removed in a future version. + * Use tf.linalg.inv instead. + *

      Computes the inverse of one or more square invertible matrices or their + * adjoints (conjugate transposes). */ @OpMetadata( opType = BatchMatrixInverse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java index 67a97a485c0..eaea0c7db31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java @@ -35,8 +35,6 @@ /** * The BatchMatrixSetDiag operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSetDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java index dc65bb1dce1..5b6749c53e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java @@ -35,8 +35,6 @@ /** * The BatchMatrixSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java index 801c5262946..7cb6714696f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java @@ -36,8 +36,6 @@ /** * The BatchMatrixSolveLs operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSolveLs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java index ae63e405dd7..d7b326bae21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java @@ -35,8 +35,6 @@ /** * The BatchMatrixTriangularSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixTriangularSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java index 1d6588ac785..637625bd5db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java @@ -35,8 +35,6 @@ /** * The BatchSelfAdjointEigV2 operation - * - * @param data type for {@code e} output */ @OpMetadata( opType = BatchSelfAdjointEig.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java index cf723ceeedc..a2411601e63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java @@ -35,8 +35,6 @@ /** * The BatchSvd operation - * - * @param data type for {@code s} output */ @OpMetadata( opType = BatchSvd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java index 294a41889da..ef6d0ca1a3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java @@ -45,8 +45,6 @@ *

      Note: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Cholesky.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java index f2529b61318..ce7975bbb29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java @@ -37,8 +37,6 @@ * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CholeskyGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java index e14f2e71ef9..561e4fecbf1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java @@ -39,8 +39,6 @@ * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])} - * - * @param data type for {@code y} output */ @OpMetadata( opType = ConjugateTranspose.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java index 68ee2a65439..5c942c1e41b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java @@ -38,8 +38,6 @@ * {@code a} and {@code b} must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. - * - * @param data type for {@code product} output */ @OpMetadata( opType = Cross.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java index 62aafcde736..d63118c9f73 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java @@ -38,8 +38,6 @@ * The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants * for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Det.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java index fb820e00ddf..3276bbb78fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java @@ -46,8 +46,6 @@ * e, v = eig(a) * e = eig(a, compute_v=False) * - * - * @param data type for {@code e} output */ @OpMetadata( opType = Eig.OP_NAME, @@ -162,7 +160,7 @@ public static class Inputs extends RawOpInputs> { public final Operand input; /** - * If `True` then eigenvectors will be computed and returned in `v`. + * If {@code True} then eigenvectors will be computed and returned in {@code v}. * Otherwise, only the eigenvalues will be computed. */ public final boolean computeV; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java index 51d3eeb3fa6..5b57bad8aa4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java @@ -99,8 +99,6 @@ * supported by {@code numpy.einsum}. *
      {@literal @}end_compatibility *

    - * - * @param data type for {@code output} output */ @OpMetadata( opType = Einsum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java index ab6f58f4885..f544381e1a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = EuclideanNorm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java index 6b02bc2a059..93338f1df07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java @@ -42,8 +42,6 @@ *

    If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Inv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java index 298e01306db..a144ac2d31c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java @@ -43,8 +43,6 @@ * The {@code log_abs_determinant} is computed as {@code det(P)*sum(log(diag(LU)))} where {@code LU} * is the {@code LU} decomposition of the input and {@code P} is the corresponding * permutation matrix. - * - * @param data type for {@code sign} output */ @OpMetadata( opType = LogMatrixDeterminant.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java index 480ed23e696..9063fab1875 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java @@ -51,10 +51,6 @@ *

    P represents a permutation matrix encoded as a list of indices each between {@code 0} * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * - * @param data type for {@code lu} output - * - * @param data type for {@code p} output */ @OpMetadata( opType = Lu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java index 90a74daac9f..c817cbc9037 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java @@ -41,8 +41,6 @@ * true). *

    Note: The default kernel implementation for MatMul on GPUs uses * cublas. - * - * @param data type for {@code product} output */ @OpMetadata( opType = MatMul.OP_NAME, @@ -91,6 +89,12 @@ public static MatMul create(Scope scope, Operand a, Oper if (opts.transposeB != null) { opBuilder.setAttr("transpose_b", opts.transposeB); } + if (opts.gradA != null) { + opBuilder.setAttr("grad_a", opts.gradA); + } + if (opts.gradB != null) { + opBuilder.setAttr("grad_b", opts.gradB); + } } } return new MatMul<>(opBuilder.build()); @@ -116,6 +120,26 @@ public static Options transposeB(Boolean transposeB) { return new Options().transposeB(transposeB); } + /** + * Sets the gradA option. + * + * @param gradA the gradA option + * @return this Options instance. + */ + public static Options gradA(Boolean gradA) { + return new Options().gradA(gradA); + } + + /** + * Sets the gradB option. + * + * @param gradB the gradB option + * @return this Options instance. + */ + public static Options gradB(Boolean gradB) { + return new Options().gradB(gradB); + } + /** * Gets product. * @@ -138,6 +162,10 @@ public static class Options { private Boolean transposeB; + private Boolean gradA; + + private Boolean gradB; + private Options() { } @@ -162,6 +190,28 @@ public Options transposeB(Boolean transposeB) { this.transposeB = transposeB; return this; } + + /** + * Sets the gradA option. + * + * @param gradA the gradA option + * @return this Options instance. + */ + public Options gradA(Boolean gradA) { + this.gradA = gradA; + return this; + } + + /** + * Sets the gradB option. + * + * @param gradB the gradB option + * @return this Options instance. + */ + public Options gradB(Boolean gradB) { + this.gradB = gradB; + return this; + } } @OpInputsMetadata( @@ -179,12 +229,12 @@ public static class Inputs extends RawOpInputs> { public final Operand b; /** - * If true, "a" is transposed before multiplication. + * If true, "a" is transposed before multiplication. */ public final boolean transposeA; /** - * If true, "b" is transposed before multiplication. + * If true, "b" is transposed before multiplication. */ public final boolean transposeB; @@ -193,14 +243,26 @@ public static class Inputs extends RawOpInputs> { */ public final DataType T; + /** + * The gradA attribute + */ + public final boolean gradA; + + /** + * The gradB attribute + */ + public final boolean gradB; + public Inputs(GraphOperation op) { - super(new MatMul<>(op), op, Arrays.asList("transpose_a", "transpose_b", "T")); + super(new MatMul<>(op), op, Arrays.asList("transpose_a", "transpose_b", "T", "grad_a", "grad_b")); int inputIndex = 0; a = (Operand) op.input(inputIndex++); b = (Operand) op.input(inputIndex++); transposeA = op.attributes().getAttrBool("transpose_a"); transposeB = op.attributes().getAttrBool("transpose_b"); T = op.attributes().getAttrType("T"); + gradA = op.attributes().getAttrBool("grad_a"); + gradB = op.attributes().getAttrBool("grad_b"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java index 0a292c9d1b1..5241708f71a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java @@ -116,8 +116,6 @@ * [1, 9], * [9, 2]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java index 084c946193e..a818b134cbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java @@ -96,8 +96,6 @@ * [3, 4, 9], * [4, 3, 8]]] * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = MatrixDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java index 931a277ad1a..c6ecab46bab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java @@ -126,8 +126,6 @@ * [4, 3, 8]]] * * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = MatrixDiagPartV3.OP_NAME, @@ -270,12 +268,12 @@ public static class Inputs extends RawOpInputs - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixDiagV3.OP_NAME, @@ -311,12 +309,12 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * Some diagonals are shorter than {@code max_diag_len} and need to be padded. {@code align} is * a string specifying how superdiagonals and subdiagonals should be aligned, - * respectively. There are four possible alignments: "RIGHT_LEFT" (default), - * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals * to the right (left-pads the row) and subdiagonals to the left (right-pads the - * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is * the opposite alignment. */ public final String align; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java similarity index 65% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java index c8f1fe664c6..9332cd02b3e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.linalg; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -29,47 +29,49 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** - * The RiscFft operation - * - * @param data type for {@code output} output + * Deprecated, use python implementation tf.linalg.matrix_exponential. */ @OpMetadata( - opType = RiscFft.OP_NAME, - inputsClass = RiscFft.Inputs.class + opType = MatrixExponential.OP_NAME, + inputsClass = MatrixExponential.Inputs.class ) -public final class RiscFft extends RawOp implements Operand { +@Operator( + group = "linalg" +) +public final class MatrixExponential extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscFft"; + public static final String OP_NAME = "MatrixExponential"; private Output output; - public RiscFft(Operation operation) { + public MatrixExponential(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscFft operation. + * Factory method to create a class wrapping a new MatrixExponential operation. * * @param scope current scope * @param input The input value - * @param data type for {@code RiscFft} output and operands - * @return a new instance of RiscFft + * @param data type for {@code MatrixExponential} output and operands + * @return a new instance of MatrixExponential */ @Endpoint( describeByClass = true ) - public static RiscFft create(Scope scope, Operand input) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscFft"); + public static MatrixExponential create(Scope scope, Operand input) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "MatrixExponential"); opBuilder.addInput(input.asOutput()); - return new RiscFft<>(opBuilder.build()); + return new MatrixExponential<>(opBuilder.build()); } /** @@ -87,24 +89,24 @@ public Output asOutput() { } @OpInputsMetadata( - outputsClass = RiscFft.class + outputsClass = MatrixExponential.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The input input */ public final Operand input; /** - * The Tcomplex attribute + * The T attribute */ - public final DataType Tcomplex; + public final DataType T; public Inputs(GraphOperation op) { - super(new RiscFft<>(op), op, Arrays.asList("Tcomplex")); + super(new MatrixExponential<>(op), op, Arrays.asList("T")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); - Tcomplex = op.attributes().getAttrType("Tcomplex"); + T = op.attributes().getAttrType("T"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java index aa349aa1792..f1529a1c264 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -45,13 +46,14 @@ *

    The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the exponential for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixLogarithm.OP_NAME, inputsClass = MatrixLogarithm.Inputs.class ) +@Operator( + group = "linalg" +) public final class MatrixLogarithm extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java index 059ac9cbe72..1ec3a1444f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java @@ -132,8 +132,6 @@ * [7, 4, 2, 4]]] * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixSetDiag.OP_NAME, @@ -276,12 +274,12 @@ public static class Inputs extends RawOpInputs public final DataType T; /** - * Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * Some diagonals are shorter than {@code max_diag_len} and need to be padded. {@code align} is * a string specifying how superdiagonals and subdiagonals should be aligned, - * respectively. There are four possible alignments: "RIGHT_LEFT" (default), - * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals * to the right (left-pads the row) and subdiagonals to the left (right-pads the - * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is * the opposite alignment. */ public final String align; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java index 3b340034827..d0601c6ee57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java @@ -66,8 +66,6 @@ * least-squares solution, even when \(A\) is rank deficient. This path is * typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then * {@code l2_regularizer} is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixSolveLs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java index da577b51660..037f024d04b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java @@ -47,8 +47,6 @@ * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) * - * - * @param data type for {@code q} output */ @OpMetadata( opType = Qr.OP_NAME, @@ -165,8 +163,8 @@ public static class Inputs extends RawOpInputs> { public final Operand input; /** - * If true, compute full-sized `q` and `r`. If false - * (the default), compute only the leading `P` columns of `q`. + * If true, compute full-sized {@code q} and {@code r}. If false + * (the default), compute only the leading {@code P} columns of {@code q}. */ public final boolean fullMatrices; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java index 871e9bb5590..d3136668a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java @@ -41,8 +41,6 @@ * {@code a} (after being transposed if {@code transpose_a} is non-zero) must match the * outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMul.OP_NAME, @@ -249,12 +247,12 @@ public static class Inputs extends RawOpInputs> { public final DataType Toutput; /** - * If true, `a` is transposed before multiplication. + * If true, {@code a} is transposed before multiplication. */ public final boolean transposeA; /** - * If true, `b` is transposed before multiplication. + * If true, {@code b} is transposed before multiplication. */ public final boolean transposeB; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java index 7d0559a981b..0cc43361bf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -42,13 +43,14 @@ * match the outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). Then do broadcast add operation with bias values on the matrix * multiplication result. The bias size must match inner dimension of {@code b}. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBias.OP_NAME, inputsClass = QuantizedMatMulWithBias.Inputs.class ) +@Operator( + group = "linalg" +) public final class QuantizedMatMulWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -283,12 +285,12 @@ public static class Inputs extends RawOpInputs> { public final DataType Toutput; /** - * If true, `a` is transposed before multiplication. + * If true, {@code a} is transposed before multiplication. */ public final boolean transposeA; /** - * If true, `b` is transposed before multiplication. + * If true, {@code b} is transposed before multiplication. */ public final boolean transposeB; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java index c857d310fde..eee116597b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -43,13 +44,14 @@ * non-zero). Then do broadcast add operation with bias values on the matrix * multiplication result. The bias size must match inner dimension of {@code b}. Then do * relu activation to get non-negative result. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndRelu.OP_NAME, inputsClass = QuantizedMatMulWithBiasAndRelu.Inputs.class ) +@Operator( + group = "linalg" +) public final class QuantizedMatMulWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -279,12 +281,12 @@ public static class Inputs extends RawOpInputs public final DataType Toutput; /** - * If true, `a` is transposed before multiplication. + * If true, {@code a} is transposed before multiplication. */ public final boolean transposeA; /** - * If true, `b` is transposed before multiplication. + * If true, {@code b} is transposed before multiplication. */ public final boolean transposeB; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java index 035a5d89982..82bdde439f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -44,13 +45,14 @@ * multiplication result. The bias size must match inner dimension of {@code b}. Then do * relu activation to get non-negative result. Then do requantize operation to get * final uint8 result. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndReluAndRequantize.OP_NAME, inputsClass = QuantizedMatMulWithBiasAndReluAndRequantize.Inputs.class ) +@Operator( + group = "linalg" +) public final class QuantizedMatMulWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -300,12 +302,12 @@ public static class Inputs extends RawOpInputs - * - * @param data type for {@code e} output */ @OpMetadata( opType = SelfAdjointEig.OP_NAME, @@ -159,7 +157,7 @@ public static class Inputs extends RawOpInputs input; /** - * If `True` then eigenvectors will be computed and returned in `v`. + * If {@code True} then eigenvectors will be computed and returned in {@code v}. * Otherwise, only the eigenvalues will be computed. */ public final boolean computeV; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java index b21d43f82a9..d1057183227 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java @@ -41,8 +41,6 @@ * satisfies {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}. * If {@code adjoint} is {@code True} then each output matrix satisfies * {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Solve.OP_NAME, @@ -155,7 +153,7 @@ public static class Inputs extends RawOpInputs> { public final Operand rhs; /** - * Boolean indicating whether to solve with `matrix` or its (block-wise) + * Boolean indicating whether to solve with {@code matrix} or its (block-wise) * adjoint. */ public final boolean adjoint; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java index 224688c8e1d..cf48c52605a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java @@ -47,8 +47,6 @@ *

    The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Sqrtm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java index 7142f9fbe25..b11eafdccfc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java @@ -45,8 +45,6 @@ * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) * - * - * @param data type for {@code s} output */ @OpMetadata( opType = Svd.OP_NAME, @@ -209,15 +207,15 @@ public static class Inputs extends RawOpInputs> { /** * If true, left and right singular vectors will be - * computed and returned in `u` and `v`, respectively. - * If false, `u` and `v` are not set and should never referenced. + * computed and returned in {@code u} and {@code v}, respectively. + * If false, {@code u} and {@code v} are not set and should never referenced. */ public final boolean computeUv; /** - * If true, compute full-sized `u` and `v`. If false - * (the default), compute only the leading `P` singular vectors. - * Ignored if `compute_uv` is `False`. + * If true, compute full-sized {@code u} and {@code v}. If false + * (the default), compute only the leading {@code P} singular vectors. + * Ignored if {@code compute_uv} is {@code False}. */ public final boolean fullMatrices; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java index 6292194a118..69ee9258392 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java @@ -48,8 +48,6 @@ * [0, 0, 3, 0] * [0, 0, 0, 4]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java index ae21a73b071..838a036f84b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java @@ -49,8 +49,6 @@ * * tf.diag_part(input) ==> [1, 2, 3, 4] * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = TensorDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java index 65f22dfe32b..712576c0989 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java @@ -38,8 +38,6 @@ * Shuffle dimensions of x according to a permutation. * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} - * - * @param data type for {@code y} output */ @OpMetadata( opType = Transpose.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java index cab9bc283a4..026fbfb70bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java @@ -77,8 +77,6 @@ * # [4. ], * # [1.9999999]], dtype=float32)> * - * - * @param data type for {@code output} output */ @OpMetadata( opType = TriangularSolve.OP_NAME, @@ -225,18 +223,17 @@ public static class Inputs extends RawOpInputs rhs; /** - * Boolean indicating whether the innermost matrices in `matrix` are + * Boolean indicating whether the innermost matrices in {@code matrix} are * lower or upper triangular. */ public final boolean lower; /** - * Boolean indicating whether to solve with `matrix` or its (block-wise) - * adjoint. - * - * @compatibility(numpy) + * Boolean indicating whether to solve with {@code matrix} or its (block-wise) + * adjoint. + *

    {@literal @}compatibility(numpy)
    * Equivalent to scipy.linalg.solve_triangular - * @end_compatibility + *
    {@literal @}end_compatibility */ public final boolean adjoint; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java index e098ce81667..a6122dabc83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Calculate product with tridiagonal matrix. * Calculates product of two matrices, where left matrix is a tridiagonal matrix. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TridiagonalMatMul.OP_NAME, inputsClass = TridiagonalMatMul.Inputs.class ) +@Operator( + group = "linalg" +) public final class TridiagonalMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java index 34d0b1ba4ac..6b0a890d12e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -41,13 +42,14 @@ * pivoting, depending on {@code partial_pivoting} attribute. On GPU, Nvidia's cuSPARSE * library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv * Partial pivoting is not yet supported by XLA backends. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TridiagonalSolve.OP_NAME, inputsClass = TridiagonalSolve.Inputs.class ) +@Operator( + group = "linalg" +) public final class TridiagonalSolve extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java index 08bccb5d80e..7fd47c7c6f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -38,13 +39,14 @@ * Reads out the CSR components at batch {@code index}. * This op is meant only for debugging / testing, and its interface is not expected * to be stable. - * - * @param data type for {@code values} output */ @OpMetadata( opType = CSRSparseMatrixComponents.OP_NAME, inputsClass = CSRSparseMatrixComponents.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class CSRSparseMatrixComponents extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java index 71c35b31ad7..97fb87d7250 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java @@ -30,18 +30,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Convert a (possibly batched) CSRSparseMatrix to dense. - * - * @param data type for {@code dense_output} output */ @OpMetadata( opType = CSRSparseMatrixToDense.OP_NAME, inputsClass = CSRSparseMatrixToDense.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class CSRSparseMatrixToDense extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java index 34144514ad0..ad365783cea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java @@ -30,19 +30,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; /** * Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. - * - * @param data type for {@code values} output */ @OpMetadata( opType = CSRSparseMatrixToSparseTensor.OP_NAME, inputsClass = CSRSparseMatrixToSparseTensor.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class CSRSparseMatrixToSparseTensor extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java index f8181163ece..dfd4cdefccc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = DenseToCSRSparseMatrix.OP_NAME, inputsClass = DenseToCSRSparseMatrix.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class DenseToCSRSparseMatrix extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java index 6c6b6ba285d..fab4f97ab71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = SparseMatrixAdd.OP_NAME, inputsClass = SparseMatrixAdd.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixAdd extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java index a4d7ed766b9..5d9ed9bbbf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -55,13 +56,14 @@ * C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . * conjugate(transpose(A)) * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseMatrixMatMul.OP_NAME, inputsClass = SparseMatrixMatMul.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -299,32 +301,32 @@ public static class Inputs extends RawOpInputs { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java index c6f7974ff9c..2499a1060fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = SparseMatrixNNZ.OP_NAME, inputsClass = SparseMatrixNNZ.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixNNZ extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java index c6144467b24..d9287ea0611 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -80,6 +81,9 @@ opType = SparseMatrixOrderingAMD.OP_NAME, inputsClass = SparseMatrixOrderingAMD.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixOrderingAMD extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java index 64b3648cea4..35bd0313c79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -45,6 +46,9 @@ opType = SparseMatrixSoftmax.OP_NAME, inputsClass = SparseMatrixSoftmax.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixSoftmax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java index 5ea02b16ecc..0dba5334bcb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = SparseMatrixSoftmaxGrad.OP_NAME, inputsClass = SparseMatrixSoftmaxGrad.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixSoftmaxGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java index b589f323f5c..6542ed2c6cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -105,6 +106,9 @@ opType = SparseMatrixSparseCholesky.OP_NAME, inputsClass = SparseMatrixSparseCholesky.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixSparseCholesky extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java index 7cfd0afd589..0c7e049a352 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -106,6 +107,9 @@ opType = SparseMatrixSparseMatMul.OP_NAME, inputsClass = SparseMatrixSparseMatMul.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixSparseMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -295,22 +299,22 @@ public static class Inputs extends RawOpInputs { public final DataType type; /** - * Indicates whether `a` should be transposed. + * Indicates whether {@code a} should be transposed. */ public final boolean transposeA; /** - * Indicates whether `b` should be transposed. + * Indicates whether {@code b} should be transposed. */ public final boolean transposeB; /** - * Indicates whether `a` should be conjugate-transposed. + * Indicates whether {@code a} should be conjugate-transposed. */ public final boolean adjointA; /** - * Indicates whether `b` should be conjugate-transposed. + * Indicates whether {@code b} should be conjugate-transposed. */ public final boolean adjointB; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java index abb25dd4e2c..783fb5c2ebc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = SparseMatrixTranspose.OP_NAME, inputsClass = SparseMatrixTranspose.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixTranspose extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -141,7 +145,7 @@ public static class Inputs extends RawOpInputs { public final Operand input; /** - * Indicates whether `input` should be conjugated. + * Indicates whether {@code input} should be conjugated. */ public final boolean conjugate; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java index faede600cc5..7e4fecb5c60 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = SparseMatrixZeros.OP_NAME, inputsClass = SparseMatrixZeros.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseMatrixZeros extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java index 143ec3fd3aa..cec991e5159 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = SparseTensorToCSRSparseMatrix.OP_NAME, inputsClass = SparseTensorToCSRSparseMatrix.Inputs.class ) +@Operator( + group = "linalg.sparse" +) public final class SparseTensorToCSRSparseMatrix extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java index ef53c5f5693..0f4ee840704 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java @@ -38,8 +38,6 @@ * Given a tensor {@code x}, this operation returns a tensor containing the absolute * value of each element in {@code x}. For example, if x is an input element and y is * an output element, this operation computes \(y = |x|\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Abs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java index fd1eaeafc4a..3a0e466e8cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java @@ -43,8 +43,6 @@ * storage is proportional to the output size rather than the inputs size. *

    Unlike the original {@code accumulate_n}, {@code accumulate_n_v2} is differentiable. *

    Returns a {@code Tensor} of same shape and type as the elements of {@code inputs}. - * - * @param data type for {@code sum} output */ @OpMetadata( opType = AccumulateN.OP_NAME, @@ -116,7 +114,7 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * Shape of elements of `inputs`. + * Shape of elements of {@code inputs}. */ public final Shape shape; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java index 078326e1891..915e5b98b63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java @@ -37,8 +37,6 @@ * Computes acos of x element-wise. * Provided an input tensor, the {@code tf.math.acos} operation returns the inverse cosine of each element of the tensor. If {@code y = tf.math.cos(x)} then, {@code x = tf.math.acos(y)}. *

    Input range is {@code [-1, 1]} and the output has a range of {@code [0, pi]}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Acos.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java index 60edbd7880f..8ade37b1990 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java @@ -41,8 +41,6 @@ * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Acosh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java index 4f32acd9ee1..61db4d2e4ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java @@ -39,8 +39,6 @@ * here *

    Given two input tensors, the {@code tf.add} operation computes the sum for every element in the tensor. *

    Both input and output have a range {@code (-inf, inf)}. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Add.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java index 6cd47212eef..f2ef9209796 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java @@ -41,8 +41,6 @@ * x = [9, 7, 10] * tf.math.add_n(x) ==> 26 * - * - * @param data type for {@code sum} output */ @OpMetadata( opType = AddN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java index a9c7814636f..6ad1ff84bba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java @@ -51,8 +51,6 @@ *

    {@literal @}compatibility(numpy)
    * Equivalent to np.angle. *
    {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = Angle.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java index 5a7b5adec69..c222f3d54d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java @@ -48,8 +48,6 @@ * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ArgMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java index ff138655b1f..41aa45a10ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java @@ -48,8 +48,6 @@ * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ArgMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java index 050107db969..810aeb5fa3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java @@ -47,8 +47,6 @@ * * tf.math.asin(y) # [1.047, 0.785] = x * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Asin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java index d4170db292a..918518f2b82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Asinh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java index aab73783c10..8979ab75d9e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java @@ -47,8 +47,6 @@ * * tf.math.atan(y) # [1.047, 0.785] = x * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Atan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java index dfff4a48676..2d566d3cc22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java @@ -51,8 +51,6 @@ * * * - * - * @param data type for {@code z} output */ @OpMetadata( opType = Atan2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java index ea5729193bf..c4dd0f1ead2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java @@ -44,8 +44,6 @@ * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Atanh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java index a39144d1e94..945d2107a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselI0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI0.OP_NAME, inputsClass = BesselI0.Inputs.class ) +@Operator( + group = "math" +) public final class BesselI0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java index dfebad42475..7e27d3e4263 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselI0e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI0e.OP_NAME, inputsClass = BesselI0e.Inputs.class ) +@Operator( + group = "math" +) public final class BesselI0e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java index dec28f14920..28304567e86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselI1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI1.OP_NAME, inputsClass = BesselI1.Inputs.class ) +@Operator( + group = "math" +) public final class BesselI1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java index 0cf7fcd63fb..df3b3f937e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselI1e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI1e.OP_NAME, inputsClass = BesselI1e.Inputs.class ) +@Operator( + group = "math" +) public final class BesselI1e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java index f7b9904c100..1a895c89f00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java @@ -41,8 +41,6 @@ *

    \(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\) *

    is the incomplete beta function and \(B(a, b)\) is the complete * beta function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Betainc.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java index 6e78f0799fc..463dc277eae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code bins} output */ @OpMetadata( opType = Bincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java index 3db46461d7c..1a69b94a8e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java @@ -35,8 +35,6 @@ /** * Returns element-wise smallest integer not less than x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Ceil.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java index 798b2a9cb1a..9461d599888 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java @@ -52,8 +52,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = ComplexAbs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java index 266da810658..d46b7f2ae5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java @@ -45,8 +45,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conj.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java index 0ab0152ff02..b6b5b9595c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java @@ -43,8 +43,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cos.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java index 76a98abe533..391d2efd7ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cosh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java index b4946c796ed..90bdcdc0038 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java @@ -56,8 +56,6 @@ *

      * tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
      * 
    - * - * @param data type for {@code out} output */ @OpMetadata( opType = Cumprod.OP_NAME, @@ -200,12 +198,12 @@ public static class Inputs extends RawOpInputs> { public final Operand axis; /** - * If `True`, perform exclusive cumprod. + * If {@code True}, perform exclusive cumprod. */ public final boolean exclusive; /** - * A `bool` (default: False). + * A {@code bool} (default: False). */ public final boolean reverse; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java index 2dea6d0fbde..ff8dca235c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java @@ -56,8 +56,6 @@ *
      * tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
      * 
    - * - * @param data type for {@code out} output */ @OpMetadata( opType = Cumsum.OP_NAME, @@ -200,12 +198,12 @@ public static class Inputs extends RawOpInputs> { public final Operand axis; /** - * If `True`, perform exclusive cumsum. + * If {@code True}, perform exclusive cumsum. */ public final boolean exclusive; /** - * A `bool` (default: False). + * A {@code bool} (default: False). */ public final boolean reverse; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java index 6cf26d3e4eb..f7367703a41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -50,13 +51,14 @@ * floating point type is used instead. *

    By setting the {@code reverse} kwarg to {@code True}, the cumulative log-sum-exp is performed in the * opposite direction. - * - * @param data type for {@code out} output */ @OpMetadata( opType = CumulativeLogsumexp.OP_NAME, inputsClass = CumulativeLogsumexp.Inputs.class ) +@Operator( + group = "math" +) public final class CumulativeLogsumexp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -187,12 +189,12 @@ public static class Inputs extends RawOpInputs axis; /** - * If `True`, perform exclusive cumulative log-sum-exp. + * If {@code True}, perform exclusive cumulative log-sum-exp. */ public final boolean exclusive; /** - * A `bool` (default: False). + * A {@code bool} (default: False). */ public final boolean reverse; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java index ff9a38ba24d..808be372c5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java @@ -41,8 +41,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DenseBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java index 37117f4e1b8..3a48d548bd4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java @@ -36,8 +36,6 @@ /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of * {@code Gamma(x)}), element-wise. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Digamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java index 62a15f37da7..8ad37113d3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java @@ -37,8 +37,6 @@ * Returns x / y element-wise. * NOTE: {@code math.Div} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Div.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java index bb098cfdf14..43047bad3c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java @@ -37,8 +37,6 @@ * Returns 0 if the denominator is zero. * NOTE: {@code math.DivNoNan} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = DivNoNan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java index 1e2046e2892..ef607d7778b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java @@ -35,8 +35,6 @@ /** * Computes the Gauss error function of {@code x} element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Erf.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java index b8d11327b94..25fdbcd648c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java @@ -35,8 +35,6 @@ /** * Computes the complementary error function of {@code x} element-wise. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Erfc.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java index 1a5c7456b51..fe1d6ed1515 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java @@ -56,8 +56,6 @@ * x = tf.constant(1 + 1j) * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Exp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java index a6f8f64ab43..b9c80edf84b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java @@ -47,8 +47,6 @@ * x = tf.constant(1 + 1j) * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Expm1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java index bb9dbc4aa32..27ed6af66ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java @@ -35,8 +35,6 @@ /** * Returns element-wise largest integer not greater than x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Floor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java index 47887e1a4dd..61d57ac8c4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java @@ -37,8 +37,6 @@ * Returns x // y element-wise. * NOTE: {@code math.FloorDiv} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = FloorDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java index 58c90f87123..b41e5d112b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java @@ -40,8 +40,6 @@ * {@code floor(x / y) * y + floormod(x, y) = x}, regardless of the signs of x and y. *

    NOTE: {@code math.FloorMod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = FloorMod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java index 4f116ba6e63..224c434af9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java @@ -42,8 +42,6 @@ *

    is the lower incomplete Gamma function. *

    Note, above {@code Q(a, x)} ({@code Igammac}) is the upper regularized complete * Gamma function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Igamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java index 8be3c723c18..a3c6c4f20ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes the gradient of {@code igamma(a, x)} wrt {@code a}. - * - * @param data type for {@code z} output */ @OpMetadata( opType = IgammaGradA.OP_NAME, inputsClass = IgammaGradA.Inputs.class ) +@Operator( + group = "math" +) public final class IgammaGradA extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java index 1cc0549ad00..80f2545ce69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java @@ -42,8 +42,6 @@ *

    is the upper incomplete Gamma function. *

    Note, above {@code P(a, x)} ({@code Igamma}) is the lower regularized complete * Gamma function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Igammac.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java index fe04cd17336..509de2b8c7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java @@ -47,8 +47,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Imag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java index 3035d46e60c..a466109898c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java @@ -46,8 +46,6 @@ * # tensor `x` is [3, 4, 0, 2, 1] * invert_permutation(x) ==> [2, 4, 3, 0, 1] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = InvertPermutation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java index d8c6b4889a2..4c5aea1de84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java @@ -42,8 +42,6 @@ * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Lgamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java index 32ee589536a..911ab61ff0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java @@ -41,8 +41,6 @@ * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Log.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java index f280d8b0062..05fe31ad376 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java @@ -41,8 +41,6 @@ * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Log1p.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java index c46c8c6e384..0c864b79f5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java @@ -37,8 +37,6 @@ * Returns the max of x and y (i.e. x > y ? x : y) element-wise. * NOTE: {@code math.Maximum} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Maximum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java index 94de9fc5bd4..9018aa2bd6d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Mean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java index 588bcb3328b..b516ee5c302 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java @@ -37,8 +37,6 @@ * Returns the min of x and y (i.e. x < y ? x : y) element-wise. * NOTE: {@code math.Minimum} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Minimum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java index d318de97c9c..60ccc32e855 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java @@ -39,8 +39,6 @@ * {@code tf.truncatediv(x, y) * y + truncate_mod(x, y) = x}. *

    NOTE: {@code math.Mod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Mod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java index d7466085ada..d18a48a6472 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java @@ -37,8 +37,6 @@ * Returns x * y element-wise. * NOTE: {@code math.Mul} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Mul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java index 85429b70ca1..7e85f94c31d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java @@ -37,8 +37,6 @@ * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. * NOTE: {@code math.MulNoNan} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = MulNoNan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java index 37d1ffb8fc9..2c9b4f4719f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java @@ -35,8 +35,6 @@ /** * The Ndtri operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Ndtri.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java index e0ec5783144..e11b274470a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java @@ -36,8 +36,6 @@ /** * Computes numerical negative value element-wise. * I.e., \(y = -x\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Neg.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java index 45ff3a179ca..fef32810db3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java @@ -40,8 +40,6 @@ *

    {@literal @}compatibility(cpp)
    * Equivalent to C++ std::nextafter function. *
    {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = NextAfter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java index b2fb442489b..f391fef2335 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java @@ -39,8 +39,6 @@ *

    \(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\) *

    where \(\psi(x)\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Polygamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java index f50532e8d62..3a8f8acbb7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java @@ -42,8 +42,6 @@ * # tensor 'y' is [[8, 16], [2, 3]] * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * - * - * @param data type for {@code z} output */ @OpMetadata( opType = Pow.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java index ad59711dca9..cf02c4ad713 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java @@ -37,8 +37,6 @@ /** * Returns x + y element-wise, working on quantized buffers. - * - * @param data type for {@code z} output */ @OpMetadata( opType = QuantizedAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java index 6b5c3d05579..b9f1e5b062c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java @@ -37,8 +37,6 @@ /** * Returns x * y element-wise, working on quantized buffers. - * - * @param data type for {@code z} output */ @OpMetadata( opType = QuantizedMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java index 6217269b474..c85e0d73861 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java @@ -47,8 +47,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Real.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java index c1aceba76d3..fb2e7e77d33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java @@ -38,8 +38,6 @@ * If {@code x} and {@code y} are reals, this will return the floating-point division. *

    NOTE: {@code Div} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = RealDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java index 97ae15f6015..c0e6b9c573a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java @@ -36,8 +36,6 @@ /** * Computes the reciprocal of x element-wise. * I.e., \(y = 1 / x\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Reciprocal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java index 76522727a1b..9d1c672629f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient for the inverse of {@code x} wrt its input. * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = ReciprocalGrad.OP_NAME, inputsClass = ReciprocalGrad.Inputs.class ) +@Operator( + group = "math" +) public final class ReciprocalGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java index 500e112079b..a0681e950ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -40,6 +41,9 @@ opType = RequantizationRangePerChannel.OP_NAME, inputsClass = RequantizationRangePerChannel.Inputs.class ) +@Operator( + group = "math" +) public final class RequantizationRangePerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java index a08d758e335..f6dcf220ade 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java @@ -30,19 +30,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Requantizes input with min and max values known per channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RequantizePerChannel.OP_NAME, inputsClass = RequantizePerChannel.Inputs.class ) +@Operator( + group = "math" +) public final class RequantizePerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java index 716bc8be07b..62a48d4ecd0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java @@ -43,8 +43,6 @@ * rint(0.5000001) ==> 1.0 * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Rint.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java index d8a5aff3d2d..0e7441efeb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java @@ -37,8 +37,6 @@ * Rounds the values of a tensor to the nearest integer, element-wise. * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Round.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java index 12ce75ef035..3d438f10f12 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java @@ -36,8 +36,6 @@ /** * Computes reciprocal of square root of x element-wise. * I.e., \(y = 1 / \sqrt{x}\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Rsqrt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java index 6044722f85c..90fc4892083 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient for the rsqrt of {@code x} wrt its input. * Specifically, {@code grad = dy * -0.5 * y^3}, where {@code y = rsqrt(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = RsqrtGrad.OP_NAME, inputsClass = RsqrtGrad.Inputs.class ) +@Operator( + group = "math" +) public final class RsqrtGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java index 1939c7a4d3a..44ec468eaf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java @@ -73,8 +73,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java index 7d0e2af1606..2e69b2bb8b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java @@ -64,8 +64,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java index cb5a312d3ff..9dce52fceed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java @@ -73,8 +73,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java index 87738a1ac3a..77fd53d92a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java @@ -66,8 +66,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java index 578d159e289..c47c3acd24f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java @@ -44,9 +44,6 @@ * that {@code segment_ids[j] == i}. *

    If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. *

    Note that this op is currently only supported with jit_compile=True. - * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java index bd93a0303eb..8e71006a2c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java @@ -36,8 +36,6 @@ /** * Computes sigmoid of {@code x} element-wise. * Specifically, {@code y = 1 / (1 + exp(-x))}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sigmoid.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java index a787d25809d..a85b754cc61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient of the sigmoid of {@code x} wrt its input. * Specifically, {@code grad = dy * y * (1 - y)}, where {@code y = sigmoid(x)}, and * {@code dy} is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = SigmoidGrad.OP_NAME, inputsClass = SigmoidGrad.Inputs.class ) +@Operator( + group = "math" +) public final class SigmoidGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java index 15f5e07b597..ee9d2d65154 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java @@ -46,8 +46,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java index 06269cb6278..1a13ada1838 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java index 9e1a692df76..b4af201ab99 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sinh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java index 75ca95262bf..5989ca78f57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,13 +40,14 @@ * Generates points from the Sobol sequence. * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension * {@code dim}. Skips the first {@code skip} samples. - * - * @param data type for {@code samples} output */ @OpMetadata( opType = SobolSample.OP_NAME, inputsClass = SobolSample.Inputs.class ) +@Operator( + group = "math" +) public final class SobolSample extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -141,7 +143,7 @@ public static class Inputs extends RawOpInputs> { public final Operand skip; /** - * The type of the sample. One of: `float32` or `float64`. + * The type of the sample. One of: {@code float32} or {@code float64}. */ public final DataType dtype; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java index aa80f8d0840..cdb0aea4f9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java @@ -35,8 +35,6 @@ /** * The Softplus operation - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Softplus.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java index c0718a4fdff..3f2901810ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes softplus gradients for a softplus operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SoftplusGrad.OP_NAME, inputsClass = SoftplusGrad.Inputs.class ) +@Operator( + group = "math" +) public final class SoftplusGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java index ac6cd68b529..8c6edfc6e89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java @@ -36,8 +36,6 @@ /** * Computes square root of x element-wise. * I.e., \(y = \sqrt{x} = x^{1/2}\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sqrt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java index 893814519ad..eed0209152b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient for the sqrt of {@code x} wrt its input. * Specifically, {@code grad = dy * 0.5 / y}, where {@code y = sqrt(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = SqrtGrad.OP_NAME, inputsClass = SqrtGrad.Inputs.class ) +@Operator( + group = "math" +) public final class SqrtGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java index d5811d17c2a..2952af307d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java @@ -36,8 +36,6 @@ /** * Computes square of x element-wise. * I.e., \(y = x * x = x^2\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Square.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java index 2af6fe083e3..4d880a79baa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java @@ -37,8 +37,6 @@ * Returns conj(x - y)(x - y) element-wise. * NOTE: {@code math.SquaredDifference} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = SquaredDifference.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java index 6313555f9f1..b48b311d80e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java @@ -37,8 +37,6 @@ * Returns x - y element-wise. * NOTE: {@code math.Sub} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Sub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java index 566b7d2b03f..c1073f8a5bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java @@ -43,8 +43,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Tan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java index ee24b4085df..706a8d90cd0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java @@ -49,8 +49,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Tanh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java index ca83e939fe4..273adcf20a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient for the tanh of {@code x} wrt its input. * Specifically, {@code grad = dy * (1 - y*y)}, where {@code y = tanh(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = TanhGrad.OP_NAME, inputsClass = TanhGrad.Inputs.class ) +@Operator( + group = "math" +) public final class TanhGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java index 377eb5848d8..7857bd6221b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java @@ -41,8 +41,6 @@ * Python Semantics. *

    NOTE: {@code math.TruncateDiv} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = TruncateDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java index e80c75e5709..bd7a41fafd2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java @@ -38,8 +38,6 @@ * the result here is consistent with a truncating divide. E.g. {@code truncate(x / y) * y + truncate_mod(x, y) = x}. *

    NOTE: {@code math.TruncateMod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = TruncateMod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java index 84c58201f70..312c712b44e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -51,13 +52,14 @@ * i.e. For both operands {@code lhs} and {@code rhs}, * if {@code operand.quantization_axis} >= 0 and {@code output.quantization_axis} >= 0, * {@code operand.dims} - {@code operand.quantization_axis} must be equal to {@code output.dims} - {@code output.quantization_axis}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedAdd.OP_NAME, inputsClass = UniformQuantizedAdd.Inputs.class ) +@Operator( + group = "math" +) public final class UniformQuantizedAdd extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -310,69 +312,69 @@ public static class Inputs extends RawOpInputs * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java index db83daaead7..af919665a56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java @@ -64,8 +64,6 @@ * result in safe but unspecified behavior, which may include ignoring * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java index a36c653ef2a..fd3f76bc1e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java @@ -64,8 +64,6 @@ * result in safe but unspecified behavior, which may include ignoring * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java index 14c0bef2293..af4dd57e39f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java @@ -67,8 +67,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java index 8be3546a9f0..0ba35ba8a83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x / y otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xdivy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java index b798c8ef598..c6e6184bed0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xlog1py.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java index b4ad543093f..e27ef9a210c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xlogy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java index 887fb1af711..593507c4340 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java @@ -37,8 +37,6 @@ * Compute the Hurwitz zeta function \(\zeta(x, q)\). * The Hurwitz zeta function is defined as: *

    \(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) - * - * @param data type for {@code z} output */ @OpMetadata( opType = Zeta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java index a4b68423646..a208c49973f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java @@ -35,8 +35,6 @@ /** * The Erfinv operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = erfinv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java index bda86750e13..839ca6179b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselJ0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselJ0.OP_NAME, inputsClass = BesselJ0.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselJ0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java index f193f6ffa69..6e125a29821 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselJ1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselJ1.OP_NAME, inputsClass = BesselJ1.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselJ1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java index 9b47bab8dc1..8ec9f528212 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselK0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK0.OP_NAME, inputsClass = BesselK0.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselK0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java index cc8c267674c..69d5995c59d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselK0e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK0e.OP_NAME, inputsClass = BesselK0e.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselK0e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java index 1247c91aeee..f26b95a8c53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselK1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK1.OP_NAME, inputsClass = BesselK1.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselK1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java index 578ad729543..995eaccd9dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselK1e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK1e.OP_NAME, inputsClass = BesselK1e.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselK1e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java index 9fda9433a0c..1beae63d61f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselY0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselY0.OP_NAME, inputsClass = BesselY0.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselY0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java index dc8ddd9700d..3985dee42d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The BesselY1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselY1.OP_NAME, inputsClass = BesselY1.Inputs.class ) +@Operator( + group = "math.special" +) public final class BesselY1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java index 0d920d5a275..e34e0376249 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The Dawsn operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Dawsn.OP_NAME, inputsClass = Dawsn.Inputs.class ) +@Operator( + group = "math.special" +) public final class Dawsn extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java index e5dc1567219..9b61e0fcb90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The Expint operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Expint.OP_NAME, inputsClass = Expint.Inputs.class ) +@Operator( + group = "math.special" +) public final class Expint extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java index ec38bd4e34b..dffb6bda0f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The FresnelCos operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = FresnelCos.OP_NAME, inputsClass = FresnelCos.Inputs.class ) +@Operator( + group = "math.special" +) public final class FresnelCos extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java index 6819ad7842b..23e7e1d4bbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The FresnelSin operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = FresnelSin.OP_NAME, inputsClass = FresnelSin.Inputs.class ) +@Operator( + group = "math.special" +) public final class FresnelSin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java index 26e610a9a2c..0a012a3be6c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The Spence operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Spence.OP_NAME, inputsClass = Spence.Inputs.class ) +@Operator( + group = "math.special" +) public final class Spence extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java index 9cbe9d8c9a3..3d6355679c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java @@ -38,8 +38,6 @@ * Performs average pooling on the input. * Each entry in {@code output} is the mean of the corresponding size {@code ksize} * window in {@code value}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool.OP_NAME, @@ -165,12 +163,12 @@ public static class Inputs extends RawOpInputs> { public final Operand value; /** - * The size of the sliding window for each dimension of `value`. + * The size of the sliding window for each dimension of {@code value}. */ public final long[] ksize; /** - * The stride of the sliding window for each dimension of `value`. + * The stride of the sliding window for each dimension of {@code value}. */ public final long[] strides; @@ -181,10 +179,10 @@ public static class Inputs extends RawOpInputs> { /** * Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java index 5fd233ad4d6..5f5410d91d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java @@ -38,8 +38,6 @@ * Performs 3D average pooling on the input. * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in * {@code value}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool3d.OP_NAME, @@ -168,13 +166,13 @@ public static class Inputs extends RawOpInputs> /** * 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. */ public final long[] ksize; /** * 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. */ public final long[] strides; @@ -185,10 +183,10 @@ public static class Inputs extends RawOpInputs> /** * The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java index 7c4e8005968..4b41a0338b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java @@ -37,8 +37,6 @@ /** * Computes gradients of average pooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool3dGrad.OP_NAME, @@ -175,13 +173,13 @@ public static class Inputs extends RawOpInputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = AvgPoolGrad.OP_NAME, inputsClass = AvgPoolGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class AvgPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -186,10 +188,10 @@ public static class Inputs extends RawOpInputs /** * Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java index deaec7bdd3d..ef7ead8115e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java @@ -36,8 +36,6 @@ /** * Batch normalization. * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. - * - * @param data type for {@code result} output */ @OpMetadata( opType = BatchNormWithGlobalNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java index f75aebb0e4c..03e84d778c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java @@ -36,8 +36,6 @@ /** * Gradients for batch normalization. * This op is deprecated. See {@code tf.nn.batch_normalization}. - * - * @param data type for {@code dx} output */ @OpMetadata( opType = BatchNormWithGlobalNormalizationGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java index d5c80bb1acd..5f826546b07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java @@ -37,8 +37,6 @@ * Adds {@code bias} to {@code value}. * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. * Broadcasting is supported, so {@code value} may have any number of dimensions. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BiasAdd.OP_NAME, @@ -167,12 +165,12 @@ public static class Inputs extends RawOpInputs> { /** * Specify the data format of the input and output data. With the - * default format "NHWC", the bias tensor will be added to the last dimension + * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. - * The tensor will be added to "in_channels", the third-to-the-last - * dimension. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java index dffab9d2958..33c2829c271 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java @@ -38,8 +38,6 @@ * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BiasAddGrad.OP_NAME, @@ -161,12 +159,12 @@ public static class Inputs extends RawOpInputs> /** * Specify the data format of the input and output data. With the - * default format "NHWC", the bias tensor will be added to the last dimension + * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. - * The tensor will be added to "in_channels", the third-to-the-last - * dimension. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java index 7afdca6853f..ef303c35efc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -55,13 +56,14 @@ * this op uses IFCO. So in order for the following snippet to be equivalent * all gate-related outputs should be reordered. * - * - * @param data type for {@code i} output */ @OpMetadata( opType = BlockLSTM.OP_NAME, inputsClass = BlockLSTM.Inputs.class ) +@Operator( + group = "nn" +) public final class BlockLSTM extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java index f65b4eba384..85bc08f38b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Computes the LSTM cell backward propagation for the entire time sequence. * This implementation is to be used in conjunction of BlockLSTMV2. - * - * @param data type for {@code x_grad} output */ @OpMetadata( opType = BlockLSTMGrad.OP_NAME, inputsClass = BlockLSTMGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class BlockLSTMGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java index d28bc428eca..f64d2f7bc3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -42,6 +43,9 @@ opType = CTCLossV2.OP_NAME, inputsClass = CTCLossV2.Inputs.class ) +@Operator( + group = "nn" +) public final class CTCLossV2 extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -240,7 +244,7 @@ public static class Inputs extends RawOpInputs { public final boolean preprocessCollapseRepeated; /** - * Scalar. If set to false, *during* CTC calculation + * Scalar. If set to false, during CTC calculation * repeated non-blank labels will not be merged and are interpreted as * individual labels. This is a simplified version of CTC. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java index d1af59aeb93..096c8a3719f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java @@ -38,8 +38,6 @@ * Computes a N-D convolution given (N+1+batch_dims)-D {@code input} and (N+2)-D {@code filter} tensors. * General function for computing a N-D convolution. It is required that * {@code 1 <= N <= 3}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv.OP_NAME, @@ -373,8 +371,8 @@ public static class Inputs extends RawOpInputs> { public final DataType T; /** - * 1-D tensor of length `N+2`. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[N+1] = 1`. + * 1-D tensor of length {@code N+2}. The stride of the sliding window for each + * dimension of {@code input}. Must have {@code strides[0] = strides[N+1] = 1}. */ public final long[] strides; @@ -384,24 +382,24 @@ public static class Inputs extends RawOpInputs> { public final String padding; /** - * If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + * If {@code padding} is {@code "EXPLICIT"}, the list of explicit padding amounts. For the ith * dimension, the amount of padding inserted before and after the dimension is - * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * {@code explicit_paddings[2 * i]} and {@code explicit_paddings[2 * i + 1]}, respectively. If + * {@code padding} is not {@code "EXPLICIT"}, {@code explicit_paddings} must be empty. */ public final long[] explicitPaddings; /** - * Used to set the data format. By default `CHANNELS_FIRST`, uses - * `NHWC (2D) / NDHWC (3D)` or if `CHANNELS_LAST`, uses `NCHW (2D) / NCDHW (3D)`. + * Used to set the data format. By default {@code CHANNELS_FIRST}, uses + * {@code NHWC (2D) / NDHWC (3D)} or if {@code CHANNELS_LAST}, uses {@code NCHW (2D) / NCDHW (3D)}. */ public final String dataFormat; /** - * 1-D tensor of length `N+2`. The dilation factor for each dimension of - * `input`. If set to `k > 1`, there will be `k-1` skipped cells between each + * 1-D tensor of length {@code N+2}. The dilation factor for each dimension of + * {@code input}. If set to {@code k > 1}, there will be {@code k-1} skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `channels_last_format`, see above for details. Dilations in the batch + * value of {@code channels_last_format}, see above for details. Dilations in the batch * and depth dimensions must be 1. */ public final long[] dilations; @@ -415,7 +413,7 @@ public static class Inputs extends RawOpInputs> { /** * A positive integer specifying the number of groups in which the input is split * along the channel axis. Each group is convolved separately with - * `filters / groups` filters. The output is the concatenation of all the groups + * {@code filters / groups} filters. The output is the concatenation of all the groups * results along the channel axis. Input channels and filters must both be * divisible by groups. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java index b2199188473..6d7eb6e004e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java @@ -56,8 +56,6 @@ * *

    Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2d.OP_NAME, @@ -353,8 +351,8 @@ public static class Inputs extends RawOpInputs> { /** * 1-D tensor of length 4. The stride of the sliding window for each - * dimension of `input`. The dimension order is determined by the value of - * `data_format`, see below for details. + * dimension of {@code input}. The dimension order is determined by the value of + * {@code data_format}, see below for details. */ public final long[] strides; @@ -369,27 +367,27 @@ public static class Inputs extends RawOpInputs> { public final String padding; /** - * If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + * If {@code padding} is {@code "EXPLICIT"}, the list of explicit padding amounts. For the ith * dimension, the amount of padding inserted before and after the dimension is - * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * {@code explicit_paddings[2 * i]} and {@code explicit_paddings[2 * i + 1]}, respectively. If + * {@code padding} is not {@code "EXPLICIT"}, {@code explicit_paddings} must be empty. */ public final long[] explicitPaddings; /** * Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. */ public final String dataFormat; /** * 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java index dbcaaf481cb..2d5af50d5e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropFilter.OP_NAME, @@ -361,27 +359,27 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java index a71091b235b..1b8a95c8728 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java @@ -35,8 +35,6 @@ /** * Computes the gradients of convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropFilterV2.OP_NAME, @@ -354,27 +352,27 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java index ff17146e323..fc0f5f296e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java @@ -37,8 +37,6 @@ /** * Computes the gradients of convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropInput.OP_NAME, @@ -360,27 +358,27 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java index 44e9ef59aa0..04941640016 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java @@ -35,8 +35,6 @@ /** * Computes the gradients of convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropInputV2.OP_NAME, @@ -355,27 +353,27 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java index ebc6170eae0..7de4f93716d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java @@ -40,8 +40,6 @@ * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. *

    Our Conv3D implements a form of cross-correlation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3d.OP_NAME, @@ -242,7 +240,7 @@ public static class Inputs extends RawOpInputs> { /** * 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. */ public final long[] strides; @@ -253,18 +251,18 @@ public static class Inputs extends RawOpInputs> { /** * The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. */ public final String dataFormat; /** * 1-D tensor of length 5. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java index e7c35fbbe28..79970ac4d15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of 3-D convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3dBackpropFilter.OP_NAME, @@ -253,7 +251,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs 1, there will be k-1 skipped cells between each + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java index 4306849324d..d60306ab96d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java @@ -36,8 +36,6 @@ /** * Computes the gradients of 3-D convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3dBackpropInput.OP_NAME, @@ -252,7 +250,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs 1, there will be k-1 skipped cells between each + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java index 56ad8a6d8a3..f270607bb50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java @@ -43,8 +43,6 @@ * the first of these is emitted. That is, when the top path is "A B B B B", * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. - * - * @param data type for {@code log_probability} output */ @OpMetadata( opType = CtcBeamSearchDecoder.OP_NAME, @@ -204,7 +202,7 @@ public static class Inputs extends RawOpInputs sequenceLength; /** - * A scalar >= 0 (beam search beam width). + * A scalar >= 0 (beam search beam width). */ public final long beamWidth; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java index de01c874c33..688f60ab28e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java @@ -45,8 +45,6 @@ *

    Regardless of the value of merge_repeated, if the maximum index of a given * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new * element is emitted. - * - * @param data type for {@code log_probability} output */ @OpMetadata( opType = CtcGreedyDecoder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java index 3933b6dd5f8..8369dae6c75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java @@ -39,8 +39,6 @@ * Calculates the CTC Loss (log probability) for each batch entry. Also calculates * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = CtcLoss.OP_NAME, @@ -246,7 +244,7 @@ public static class Inputs extends RawOpInputs> { public final boolean preprocessCollapseRepeated; /** - * Scalar. If set to false, *during* CTC calculation + * Scalar. If set to false, during CTC calculation * repeated non-blank labels will not be merged and are interpreted as * individual labels. This is a simplified version of CTC. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java index 60ad5093171..8845090aa6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -72,13 +73,14 @@ * major. * reserve_space: An opaque tensor that can be used in backprop calculation. It * is only produced if is_training is true. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CudnnRNN.OP_NAME, inputsClass = CudnnRNN.Inputs.class ) +@Operator( + group = "nn" +) public final class CudnnRNN extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java index 2e0300fb057..a1e09f597ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -82,13 +83,14 @@ * shape as input_c. * params_backprop: The backprop to the params buffer in the forward pass. Has the * same shape as params. - * - * @param data type for {@code input_backprop} output */ @OpMetadata( opType = CudnnRNNBackprop.OP_NAME, inputsClass = CudnnRNNBackprop.Inputs.class ) +@Operator( + group = "nn" +) public final class CudnnRNNBackprop extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java index a513cf67d66..0c38a68a23e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java @@ -65,8 +65,6 @@ * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * - * @param data type for {@code params} output */ @OpMetadata( opType = CudnnRNNCanonicalToParams.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java index 6a1e55f34e2..b85a3568412 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java @@ -65,8 +65,6 @@ * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * - * @param data type for {@code weights} output */ @OpMetadata( opType = CudnnRNNParamsToCanonical.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java index 051c792e878..1dbc4d48ad8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java @@ -57,8 +57,6 @@ * compatible across GPUs. Please use CudnnRNNParamsWeights and * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. - * - * @param data type for {@code params_size} output */ @OpMetadata( opType = CudnnRnnParamsSize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java index 3376ad9ed6e..6e83cd0c867 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java @@ -36,8 +36,6 @@ /** * Returns the dimension index in the destination data format given the one in * the source data format. - * - * @param data type for {@code y} output */ @OpMetadata( opType = DataFormatDimMap.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java index e02890a40ce..f719f7cc7ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java @@ -64,8 +64,6 @@ *

      * [1, 2]
      * 
    - * - * @param data type for {@code y} output */ @OpMetadata( opType = DataFormatVecPermute.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java index cceb78d27d1..2f1880cda02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java @@ -109,8 +109,6 @@ * [ [11], [12], [15], [16]]]] * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthToSpace.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java index 57e6db5154f..93a0b744513 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java @@ -52,8 +52,6 @@ * *

    Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNative.OP_NAME, @@ -305,7 +303,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java index d53cd3b03b4..66eb190debf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of depthwise convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNativeBackpropFilter.OP_NAME, @@ -328,18 +326,18 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java index bea9149d3e4..287b29abba1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java @@ -37,8 +37,6 @@ /** * Computes the gradients of depthwise convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNativeBackpropInput.OP_NAME, @@ -328,18 +326,18 @@ public static class Inputs extends RawOpInputs 1, there will be k-1 skipped cells between each filter + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java index 5354aefa6bb..019c786873c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java @@ -57,8 +57,6 @@ * kernel size and contains all zeros. *

    Note on duality: The dilation of {@code input} by the {@code filter} is equal to the * negation of the erosion of {@code -input} by the reflected {@code filter}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Dilation2d.OP_NAME, @@ -152,13 +150,13 @@ public static class Inputs extends RawOpInputs> /** * The stride of the sliding window for each dimension of the input - * tensor. Must be: `[1, stride_height, stride_width, 1]`. + * tensor. Must be: {@code [1, stride_height, stride_width, 1]}. */ public final long[] strides; /** * The input stride for atrous morphological dilation. Must be: - * `[1, rate_height, rate_width, 1]`. + * {@code [1, rate_height, rate_width, 1]}. */ public final long[] rates; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java index b88393b8e73..cae841aee0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java @@ -36,8 +36,6 @@ /** * Computes the gradient of morphological 2-D dilation with respect to the filter. - * - * @param data type for {@code filter_backprop} output */ @OpMetadata( opType = Dilation2dBackpropFilter.OP_NAME, @@ -139,13 +137,13 @@ public static class Inputs extends RawOpInputs data type for {@code in_backprop} output */ @OpMetadata( opType = Dilation2dBackpropInput.OP_NAME, @@ -139,13 +137,13 @@ public static class Inputs extends RawOpInputs *

    See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Elu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java index 911c0b92978..4d32b6d365f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes gradients for the exponential linear (Elu) operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = EluGrad.OP_NAME, inputsClass = EluGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class EluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java index fda9bdd66ae..bb525aac295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java @@ -41,8 +41,6 @@ * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalAvgPool.OP_NAME, @@ -304,8 +302,8 @@ public static class Inputs extends RawOpInputs value; /** - * Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * Pooling ratio for each dimension of {@code value}, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions @@ -315,8 +313,8 @@ public static class Inputs extends RawOpInputsBenjamin + * Graham, Fractional Max-Pooling for * difference between pseudorandom and random. */ public final boolean pseudoRandom; @@ -324,12 +322,9 @@ public static class Inputs extends RawOpInputs{@code index 0 1 2 3 4} + *

    {@code value 20 5 16 3 7} + *

    If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. */ public final boolean overlapping; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java index b5abcf98128..eee42886ab1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -40,13 +41,14 @@ * out_backprop to those indices that form the same pooling cell. Therefore, we * just need to know the shape of original input tensor, instead of the whole * tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalAvgPoolGrad.OP_NAME, inputsClass = FractionalAvgPoolGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class FractionalAvgPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -182,12 +184,9 @@ public static class Inputs extends RawOpInputs{@code index 0 1 2 3 4} + *

    {@code value 20 5 16 3 7} + *

    If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. */ public final boolean overlapping; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java index fdbe02fc4ed..08bcbd1a63d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java @@ -63,8 +63,6 @@ * *

    For more details on fractional max pooling, see this paper: * Benjamin Graham, Fractional Max-Pooling - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalMaxPool.OP_NAME, @@ -326,8 +324,8 @@ public static class Inputs extends RawOpInputs value; /** - * Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * Pooling ratio for each dimension of {@code value}, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions @@ -337,8 +335,8 @@ public static class Inputs extends RawOpInputsBenjamin + * Graham, Fractional Max-Pooling for * difference between pseudorandom and random. */ public final boolean pseudoRandom; @@ -346,12 +344,9 @@ public static class Inputs extends RawOpInputs{@code index 0 1 2 3 4} + *

    {@code value 20 5 16 3 7} + *

    If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. */ public final boolean overlapping; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java index 29c318e3770..d44e062ccf7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; /** * Computes gradient of the FractionalMaxPool function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalMaxPoolGrad.OP_NAME, inputsClass = FractionalMaxPoolGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class FractionalMaxPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -184,12 +186,9 @@ public static class Inputs extends RawOpInputs{@code index 0 1 2 3 4} + *

    {@code value 20 5 16 3 7} + *

    If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. */ public final boolean overlapping; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java index 50a39f70b02..f5cede8855e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java @@ -37,10 +37,6 @@ * Batch normalization. * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * - * @param data type for {@code y} output - * - * @param data type for {@code batch_mean} output */ @OpMetadata( opType = FusedBatchNorm.OP_NAME, @@ -338,7 +334,7 @@ public static class Inputs extends RawOpIn public final float exponentialAvgFactor; /** - * The data format for x and y. Either "NHWC" (default) or "NCHW". + * The data format for x and y. Either "NHWC" (default) or "NCHW". */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java index d6094f54d10..985249a19fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java @@ -38,10 +38,6 @@ * Gradient for batch normalization. * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * - * @param data type for {@code x_backprop} output - * - * @param data type for {@code scale_backprop} output */ @OpMetadata( opType = FusedBatchNormGrad.OP_NAME, @@ -316,7 +312,7 @@ public static class Inputs extends RawOpIn /** * The data format for y_backprop, x, x_backprop. - * Either "NHWC" (default) or "NCHW". + * Either "NHWC" (default) or "NCHW". */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java index aad10b64fea..336419f92ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java @@ -48,8 +48,6 @@ * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FusedPadConv2d.OP_NAME, @@ -155,7 +153,7 @@ public static class Inputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = FusedResizeAndPadConv2d.OP_NAME, @@ -210,7 +208,7 @@ public static class Inputs extends RawOpInputs - * - * @param data type for {@code r} output */ @OpMetadata( opType = GRUBlockCell.OP_NAME, inputsClass = GRUBlockCell.Inputs.class ) +@Operator( + group = "nn" +) public final class GRUBlockCell extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java index 07959198356..7379a2790ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -107,13 +108,14 @@ * * d_b_c = sum of d_c_bar along axis = 0 * - * - * @param data type for {@code d_x} output */ @OpMetadata( opType = GRUBlockCellGrad.OP_NAME, inputsClass = GRUBlockCellGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class GRUBlockCellGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java index 44fb6f47765..5f178f53e50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Computes the gradient for the inverse of {@code x} wrt its input. * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = InvGrad.OP_NAME, inputsClass = InvGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class InvGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java index 25508047cf7..ecd511253e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -37,13 +38,14 @@ /** * Solves a batch of isotonic regression problems. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IsotonicRegression.OP_NAME, inputsClass = IsotonicRegression.Inputs.class ) +@Operator( + group = "nn" +) public final class IsotonicRegression extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java index e3b179e440c..9cc952c05cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java @@ -39,8 +39,6 @@ *

      * output = sum(t ** 2) / 2
      * 
    - * - * @param data type for {@code output} output */ @OpMetadata( opType = L2Loss.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java index e0fe0976803..5b1e38d3fbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -56,13 +57,14 @@ * co = tanh(cs) * h = co .* o * - * - * @param data type for {@code i} output */ @OpMetadata( opType = LSTMBlockCell.OP_NAME, inputsClass = LSTMBlockCell.Inputs.class ) +@Operator( + group = "nn" +) public final class LSTMBlockCell extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java index 7b96ccfbbc0..931e4bf2381 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java @@ -29,19 +29,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes the LSTM cell backward propagation for 1 timestep. * This implementation is to be used in conjunction of LSTMBlockCell. - * - * @param data type for {@code cs_prev_grad} output */ @OpMetadata( opType = LSTMBlockCellGrad.OP_NAME, inputsClass = LSTMBlockCellGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class LSTMBlockCellGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java index 022b81f82da..a0f088f9a03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java @@ -35,8 +35,6 @@ /** * Computes rectified linear: {@code max(features, features * alpha)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = LeakyRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java index f0bb2b5017b..17c1e5c0d04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java @@ -46,8 +46,6 @@ * *

    For details, see Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS 2012) . - * - * @param data type for {@code output} output */ @OpMetadata( opType = LocalResponseNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java index 2f32d0cb241..c0b795094aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Gradients for Local Response Normalization. - * - * @param data type for {@code output} output */ @OpMetadata( opType = LocalResponseNormalizationGrad.OP_NAME, inputsClass = LocalResponseNormalizationGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class LocalResponseNormalizationGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -233,7 +235,7 @@ public static class Inputs extends RawOpInputs 0 to avoid dividing by 0). + * An offset (usually > 0 to avoid dividing by 0). */ public final float bias; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java index 1f9ee440140..1e19b56c19f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java @@ -39,8 +39,6 @@ *

      * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
      * 
    - * - * @param data type for {@code logsoftmax} output */ @OpMetadata( opType = LogSoftmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java index b3dd99bd3bf..75b432b8ba3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java @@ -36,8 +36,6 @@ /** * Performs max pooling on the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool.OP_NAME, @@ -178,10 +176,10 @@ public static class Inputs extends RawOpInputs> { /** * Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java index 5aacb5f1c17..d701189d5e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java @@ -36,8 +36,6 @@ /** * Performs 3D max pooling on the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3d.OP_NAME, @@ -166,13 +164,13 @@ public static class Inputs extends RawOpInputs> /** * 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. */ public final long[] ksize; /** * 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. */ public final long[] strides; @@ -183,10 +181,10 @@ public static class Inputs extends RawOpInputs> /** * The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java index ac313f4d45a..932399be80b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java @@ -36,8 +36,6 @@ /** * Computes gradients of 3D max pooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3dGrad.OP_NAME, @@ -182,13 +180,13 @@ public static class Inputs extends RawOpIn /** * 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. */ public final long[] ksize; /** * 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. */ public final long[] strides; @@ -199,10 +197,10 @@ public static class Inputs extends RawOpIn /** * The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java index 585f30e5aa9..74dbc598b35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java @@ -36,8 +36,6 @@ /** * Computes second-order gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3dGradGrad.OP_NAME, @@ -181,13 +179,13 @@ public static class Inputs extends RawOpInputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = MaxPoolGrad.OP_NAME, @@ -188,10 +186,10 @@ public static class Inputs extends RawOpInputs /** * Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. */ public final String dataFormat; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java index 7189844a8d6..0b0f0f616b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java @@ -36,8 +36,6 @@ /** * Computes second-order gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradGrad.OP_NAME, @@ -188,10 +186,10 @@ public static class Inputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradGradWithArgmax.OP_NAME, @@ -189,7 +187,7 @@ public static class Inputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradWithArgmax.OP_NAME, inputsClass = MaxPoolGradWithArgmax.Inputs.class ) +@Operator( + group = "nn" +) public final class MaxPoolGradWithArgmax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -185,7 +187,7 @@ public static class Inputs extends RawOpInputs data type for {@code output} output - * - * @param data type for {@code argmax} output */ @OpMetadata( opType = MaxPoolWithArgmax.OP_NAME, @@ -221,7 +217,7 @@ public static class Inputs extends RawOpInputs * values.shape = input.shape[:-1] * - * - * @param data type for {@code values} output */ @OpMetadata( opType = NthElement.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java index 2e27d649947..8987fcd7d55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java @@ -37,8 +37,6 @@ /** * Produces the average pool of the input tensor for quantized types. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedAvgPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java index 0b9e3b27b55..7f22995509c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java @@ -39,8 +39,6 @@ * Quantized Batch normalization. * This op is deprecated and will be removed in the future. Prefer * {@code tf.nn.batch_normalization}. - * - * @param data type for {@code result} output */ @OpMetadata( opType = QuantizedBatchNormWithGlobalNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java index c95300fa493..744eb1397eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java @@ -38,8 +38,6 @@ /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedBiasAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java index a09b4b1ccdf..9226b7b697e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndRelu.OP_NAME, inputsClass = QuantizedConv2DAndRelu.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java index 4c39a3d850b..f02eba09012 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndReluAndRequantize.OP_NAME, inputsClass = QuantizedConv2DAndReluAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java index 52ad5aad565..66344508160 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndRequantize.OP_NAME, inputsClass = QuantizedConv2DAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java index bbc48628147..bfd108c34d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes QuantizedConv2D per channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DPerChannel.OP_NAME, inputsClass = QuantizedConv2DPerChannel.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DPerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java index a1b8bba235e..fe5566ac7e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBias operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBias.OP_NAME, inputsClass = QuantizedConv2DWithBias.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java index e9e3fc45325..ff7d157a846 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndRelu.OP_NAME, inputsClass = QuantizedConv2DWithBiasAndRelu.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java index 84787fb55ca..b68080cc72c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndReluAndRequantize.OP_NAME, inputsClass = QuantizedConv2DWithBiasAndReluAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java index 82bbd0dab73..5301017e666 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndRequantize.OP_NAME, inputsClass = QuantizedConv2DWithBiasAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java index 38e1c2b09d3..687e41485d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasSignedSumAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.OP_NAME, inputsClass = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasSignedSumAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java index 0a982087a43..34ceb6e7898 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasSumAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSumAndRelu.OP_NAME, inputsClass = QuantizedConv2DWithBiasSumAndRelu.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasSumAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java index 67a018207b2..021873d6885 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedConv2DWithBiasSumAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSumAndReluAndRequantize.OP_NAME, inputsClass = QuantizedConv2DWithBiasSumAndReluAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedConv2DWithBiasSumAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java index ff2b1bb989a..77d21ba9794 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java @@ -42,8 +42,6 @@ * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2d.OP_NAME, @@ -283,9 +281,9 @@ public static class Inputs extends RawOpInputs> { /** * 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. */ public final long[] dilations; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java index 87106dc7ecd..3281b31698b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes quantized depthwise Conv2D. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2D.OP_NAME, inputsClass = QuantizedDepthwiseConv2D.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedDepthwiseConv2D extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java index c85c050c25b..70314ace0b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes quantized depthwise Conv2D with Bias. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBias.OP_NAME, inputsClass = QuantizedDepthwiseConv2DWithBias.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedDepthwiseConv2DWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java index d85101d528e..76b0917f709 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes quantized depthwise Conv2D with Bias and Relu. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBiasAndRelu.OP_NAME, inputsClass = QuantizedDepthwiseConv2DWithBiasAndRelu.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedDepthwiseConv2DWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java index 2a70d8f3bd2..55dfdecdb39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * Computes quantized depthwise Conv2D with Bias, Relu and Requantize. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.OP_NAME, inputsClass = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.Inputs.class ) +@Operator( + group = "nn" +) public final class QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java index af8f32f2353..48aedde6806 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java @@ -36,8 +36,6 @@ /** * Quantized Instance normalization. - * - * @param data type for {@code y} output */ @OpMetadata( opType = QuantizedInstanceNorm.OP_NAME, @@ -287,19 +285,19 @@ public static class Inputs extends RawOpInputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = QuantizedMaxPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java index b80e07346d9..ad55085ab6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear: {@code max(features, 0)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java index d820e51188a..2b2f21a6b45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear 6: {@code min(max(features, 0), 6)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedRelu6.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java index 577df61b8dd..41daae389b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear X: {@code min(max(features, 0), max_value)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedReluX.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java index 218fee4f3d2..126eb0c4c56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java @@ -45,8 +45,6 @@ * * * - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Relu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java index 19de03d7f8e..5500229b21c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java @@ -35,8 +35,6 @@ /** * Computes rectified linear 6: {@code min(max(features, 0), 6)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Relu6.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java index 34ccb4a740f..9af8b816d87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes rectified linear 6 gradients for a Relu6 operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = Relu6Grad.OP_NAME, inputsClass = Relu6Grad.Inputs.class ) +@Operator( + group = "nn" +) public final class Relu6Grad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java index 15b361b3924..b15132dd583 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes rectified linear gradients for a Relu operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = ReluGrad.OP_NAME, inputsClass = ReluGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class ReluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java index d382a2f5a75..33d504105ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java @@ -40,8 +40,6 @@ * {@code initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')}. * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. *

    See Self-Normalizing Neural Networks - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Selu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java index a1f1f50785a..bd2d2203f69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes gradients for the scaled exponential linear (Selu) operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SeluGrad.OP_NAME, inputsClass = SeluGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class SeluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java index 36ef20f21fd..dd6b9ecb2b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java @@ -39,8 +39,6 @@ *

      * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
      * 
    - * - * @param data type for {@code softmax} output */ @OpMetadata( opType = Softmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 9a17188c048..a7836f24051 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -36,8 +36,6 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = SoftmaxCrossEntropyWithLogits.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java index 1345a1ffd11..1144c4c21be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java @@ -35,8 +35,6 @@ /** * Computes softsign: {@code features / (abs(features) + 1)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Softsign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java index 129e475474c..3ebe407b08e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes softsign gradients for a softsign operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SoftsignGrad.OP_NAME, inputsClass = SoftsignGrad.Inputs.class ) +@Operator( + group = "nn" +) public final class SoftsignGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java index 050a12e7f98..e35f65ee574 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java @@ -100,8 +100,6 @@ * *

    Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToBatch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java index 18449c4627c..aaaddf55663 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java @@ -103,8 +103,6 @@ * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToDepth.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 043587de9b5..1b7c99a694e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -40,8 +40,6 @@ * of features. This label is considered to have probability 1.0 for the * given row. *

    Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = SparseSoftmaxCrossEntropyWithLogits.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java index 49552b20020..5185b5fd785 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java @@ -46,10 +46,6 @@ * values.shape = indices.shape = input.shape[:-1] + [k] * *

    If two elements are equal, the lower-index element appears first. - * - * @param data type for {@code values} output - * - * @param data type for {@code indices} output */ @OpMetadata( opType = TopK.OP_NAME, @@ -122,7 +118,7 @@ public static TopK create(Scope sco describeByClass = true ) public static TopK create(Scope scope, Operand input, - Operand k, Options[] options) { + Operand k, Options... options) { return create(scope, input, k, TInt32.class, options); } @@ -193,7 +189,7 @@ public static class Inputs extends RawOpInputs> { public final Operand k; /** - * If true the resulting `k` elements will be sorted by the values in + * If true the resulting {@code k} elements will be sorted by the values in * descending order. */ public final boolean sorted; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java index fbc664f3e68..124c2b062f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -54,13 +55,14 @@ * *

    {@code output} is also quantized, using the same formula. * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedConvolution.OP_NAME, inputsClass = UniformQuantizedConvolution.Inputs.class ) +@Operator( + group = "nn" +) public final class UniformQuantizedConvolution extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -673,132 +675,131 @@ public static class Inputs extends RawOpInputs outputZeroPoints; /** - * The type of `lhs` and `rhs` input `Tensor`. + * The type of {@code lhs} and {@code rhs} input {@code Tensor}. */ public final DataType Tin; /** - * The type of `output` `Tensor`. + * The type of {@code output} {@code Tensor}. */ public final DataType Tout; /** - * The stride of the sliding window for each spatial dimension of `lhs`. + * The stride of the sliding window for each spatial dimension of {@code lhs}. * Must be an empty list (default) or a list of size (number of spatial dimensions). * If an empty list is provided, the stride for each spatial dimension is set to 1. */ public final long[] windowStrides; /** - * string from: `"SAME"`, `"VALID"`, or `"EXPLICIT"`, indicating the type of padding algorithm to use. + * string from: {@code "SAME"}, {@code "VALID"}, or {@code "EXPLICIT"}, indicating the type of padding algorithm to use. */ public final String padding; /** - * If `padding` is `"EXPLICIT"`, must be set as a list indicating - * the explicit paddings at the start and end of each `lhs` spatial dimension. + * If {@code padding} is {@code "EXPLICIT"}, must be set as a list indicating + * the explicit paddings at the start and end of each {@code lhs} spatial dimension. * Otherwise, this must be empty. - * - * (If used,) Must be a list of size `2 * (number of lhs spatial dimensions)`, - * where `(explicit_padding[2 * i], explicit_padding[2 * i + 1])` indicates - * `(start_padding, end_padding)` of `spatial_dimensions[i]`. + *

    (If used,) Must be a list of size {@code 2 * (number of lhs spatial dimensions)}, + * where {@code (explicit_padding[2 * i], explicit_padding[2 * i + 1])} indicates + * {@code (start_padding, end_padding)} of {@code spatial_dimensions[i]}. */ public final long[] explicitPadding; /** - * The dilation factor to apply in each spatial dimension of `lhs`. - * Must be an empty list (default) or a list of size (number of `lhs` spatial dimensions). - * If empty list, the dilation for each `lhs` spatial dimension is set to 1. + * The dilation factor to apply in each spatial dimension of {@code lhs}. + * Must be an empty list (default) or a list of size (number of {@code lhs} spatial dimensions). + * If empty list, the dilation for each {@code lhs} spatial dimension is set to 1. */ public final long[] lhsDilation; /** - * The dilation factor to apply in each spatial dimension of `rhs`. - * Must be an empty list (default) or a list of size (number of `rhs` spatial dimensions). - * If empty list, the dilation for each `rhs` spatial dimension is set to 1. + * The dilation factor to apply in each spatial dimension of {@code rhs}. + * Must be an empty list (default) or a list of size (number of {@code rhs} spatial dimensions). + * If empty list, the dilation for each {@code rhs} spatial dimension is set to 1. */ public final long[] rhsDilation; /** * The number of batch groups. Used for grouped filters. - * Must be a divisor of `output_feature`. + * Must be a divisor of {@code output_feature}. */ public final long batchGroupCount; /** * The number of feature groups. Used for grouped convolutions. - * Must be a divisor of both `lhs_feature` and `output_feature`. + * Must be a divisor of both {@code lhs_feature} and {@code output_feature}. */ public final long featureGroupCount; /** * Structure of dimension information for the convolution op. - * Must be an empty string (default) or a serialized string of `tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr` proto. - * If empty string, the default is `("NCHW", "OIHW", "NCHW")` (for a 2D convolution). + * Must be an empty string (default) or a serialized string of {@code tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr} proto. + * If empty string, the default is {@code ("NCHW", "OIHW", "NCHW")} (for a 2D convolution). */ public final String dimensionNumbers; /** * Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. * If set to -1 (default), this indicates per-tensor quantization. - * For the `lhs`, only per-tensor quantization is supported. + * For the {@code lhs}, only per-tensor quantization is supported. * Thus, this must be set to -1. * Other values will raise error at OpKernel construction. */ public final long lhsQuantizationAxis; /** - * The min value of the quantized data stored in `lhs`. - * For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + * The min value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. */ public final long lhsQuantizationMinVal; /** - * The max value of the quantized data stored in `lhs`. - * For example, if `Tin` is `qint8`, this must be set to 127. + * The max value of the quantized data stored in {@code lhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. */ public final long lhsQuantizationMaxVal; /** * Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. * If set to -1 (default), this indicates per-tensor quantization. - * For the `rhs`, only per-tensor quantization - * or per-channel quantization along `kernel_output_feature_dimension` is supported. - * Thus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. + * For the {@code rhs}, only per-tensor quantization + * or per-channel quantization along {@code kernel_output_feature_dimension} is supported. + * Thus, this must be set to -1 or {@code dimension_numbers.kernel_output_feature_dimension}. * Other values will raise error at OpKernel construction. */ public final long rhsQuantizationAxis; /** - * The min value of the quantized data stored in `rhs`. - * For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + * The min value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. */ public final long rhsQuantizationMinVal; /** - * The max value of the quantized data stored in `rhs`. - * For example, if `Tin` is `qint8`, this must be set to 127. + * The max value of the quantized data stored in {@code rhs}. + * For example, if {@code Tin} is {@code qint8}, this must be set to 127. */ public final long rhsQuantizationMaxVal; /** * Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. * If set to -1 (default), this indicates per-tensor quantization. - * For the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported. - * Thus, this must be set to -1 or `dimension_numbers.output_feature_dimension`. + * For the {@code output}, only per-tensor quantization or per-channel quantization along {@code output_feature_dimension} is supported. + * Thus, this must be set to -1 or {@code dimension_numbers.output_feature_dimension}. * Other values will raise error at OpKernel construction. */ public final long outputQuantizationAxis; /** - * The min value of the quantized data stored in `output`. - * For example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + * The min value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to -127 if narrow range quantized or -128 if not. */ public final long outputQuantizationMinVal; /** - * The max value of the quantized data stored in `output`. - * For example, if `Tout` is `qint8`, this must be set to 127. + * The max value of the quantized data stored in {@code output}. + * For example, if {@code Tout} is {@code qint8}, this must be set to 127. */ public final long outputQuantizationMaxVal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java index 5a40bfa376f..8510272759e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -54,13 +55,14 @@ * *

    {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedConvolutionHybrid.OP_NAME, inputsClass = UniformQuantizedConvolutionHybrid.Inputs.class ) +@Operator( + group = "nn" +) public final class UniformQuantizedConvolutionHybrid extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -540,12 +542,12 @@ public static class Inputs extends RawOpInputs rhsZeroPoints; /** - * The type of `lhs` input Tensor. + * The type of {@code lhs} input Tensor. */ public final DataType Tlhs; /** - * The type of `rhs` (quantized) input Tensor. + * The type of {@code rhs} (quantized) input Tensor. */ public final DataType Trhs; @@ -555,37 +557,36 @@ public static class Inputs extends RawOpInputs(If used,) Must be a list of size 2 * (number of lhs spatial dimensions), * where (explicit_padding[2 * i], explicit_padding[2 * i + 1]) indicates * spatial_dimensions[i] (start_padding, end_padding). */ public final long[] explicitPadding; /** - * The dilation factor to apply in each spatial dimension of `lhs`. + * The dilation factor to apply in each spatial dimension of {@code lhs}. * Must be an empty list (default) or a list of size (number of lhs spatial dimensions). * If empty list, the dilation for each lhs spatial dimension is set to 1. */ public final long[] lhsDilation; /** - * The dilation factor to apply in each spatial dimension of `rhs`. + * The dilation factor to apply in each spatial dimension of {@code rhs}. * Must be an empty list (default) or a list of size (number of rhs spatial dimensions). * If empty list, the dilation for each rhs spatial dimension is set to 1. */ @@ -606,29 +607,29 @@ public static class Inputs extends RawOpInputs - * - * @param data type for {@code output} output */ @OpMetadata( opType = Dequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java index 4f3b11ce977..87007c73d6b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java @@ -136,6 +136,38 @@ public static Options narrowRange(Boolean narrowRange) { * Gets backprops. * Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: * {@code gradients * (inputs >= min && inputs <= max)}. + *

    +   * import tensorflow as tf
    +   *
    +   * # Define some sample data
    +   * gradients = tf.random.uniform((2, 3), minval=-5.0, maxval=5.0, dtype=tf.float32)
    +   * inputs = tf.random.uniform((2, 3), minval=-10.0, maxval=10.0, dtype=tf.float32)
    +   *
    +   * # Define quantization parameters (adjust as needed)
    +   * min_val = -2.0
    +   * max_val = 8.0
    +   * num_bits = 4  # Number of bits for quantization
    +   *
    +   * # Calculate gradients for fake quantization with specified parameters
    +   * output_gradients = tf.quantization.fake_quant_with_min_max_args_gradient(
    +   *     gradients=gradients, inputs=inputs, min=min_val, max=max_val, num_bits=num_bits, narrow_range = False, name=None
    +   * )
    +   *
    +   * # Print the original gradients and the gradients after the fake-quant operation
    +   * print("Original Gradients:")
    +   * print(gradients)
    +   * print("\nGradients after Fake-Quantization:")
    +   * print(output_gradients)
    +   *
    +   * 
    + *

    #Original Gradients: + * #tf.Tensor( + * #[[ 1.242547 3.217492 3.568469 ] + * #[-0.55371046 0.23130894 2.608243 ]], shape=(2, 3), dtype=float32) + *

    #Gradients after Fake-Quantization: + * #tf.Tensor( + * #[[ 0. 3.217492 3.568469 ] + * [-0.55371046 0.23130894 2.608243 ]], shape=(2, 3), dtype=float32)
    * @return backprops. */ public Output backprops() { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java index faa38bb0585..e78b22ca6d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java @@ -57,6 +57,28 @@ * *

    This operation has a gradient and thus allows for training {@code min} and {@code max} * values. + *

    + *
    + *
    + *

    constant_input = tf.constant([[1.2, -0.3, 0.7], [2.1, 0.5, -1.0]], dtype=tf.float32) + *

    min_val = -0.5 + * max_val = 0.8 + * num_bits = 8 + * narrow_range = False #False:for the quantization range [0; 2^num_bits - 1] + *

    quantized_data = tf.quantization.fake_quant_with_min_max_vars( + * ... inputs=constant_input, min=min_val, max=max_val, num_bits=num_bits, narrow_range=narrow_range + * ... ) + *

    print("Input:\n", constant_input.numpy()) + * Input: + * [[ 1.2 -0.3 0.7] + * [ 2.1 0.5 -1. ]] + * print("Output:\n", quantized_data.numpy()) + * Output: + * [[ 0.8003921 -0.3007843 0.6984313] + * [ 0.8003921 0.4996078 -0.4996078]] + *

    + *
    + *
    */ @OpMetadata( opType = FakeQuantWithMinMaxVars.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java index a6a5df07a8a..ed34d301ec7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java @@ -128,8 +128,6 @@ *

    Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Quantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java index b6552257828..eeb9f05536c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java @@ -38,8 +38,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java index a715ecdb8e5..e1de6cd2ab7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java @@ -38,8 +38,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantizeV3.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java index 161eae5c2f2..7de2e59c64b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java @@ -37,8 +37,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantizeV4.OP_NAME, @@ -114,7 +112,7 @@ public static QuantizeAndDequantizeV4 create(Scope scope, * Sets the signedInput option. * * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter should - * have been called {@code signed_output}</b>) + * have been called {@code signed_output}) * @return this Options instance. */ public static Options signedInput(Boolean signedInput) { @@ -218,7 +216,7 @@ private Options() { * Sets the signedInput option. * * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter should - * have been called {@code signed_output}</b>) + * have been called {@code signed_output}) * @return this Options instance. */ public Options signedInput(Boolean signedInput) { @@ -317,7 +315,7 @@ public static class Inputs extends RawOpInputs`signed_output`) + * have been called {@code signed_output}) */ public final boolean signedInput; @@ -327,7 +325,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs + *

  • HALF_TO_EVEN: this is the default round_mode.
  • + *
  • HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + * rounds up to -7.
  • + * */ public final String roundMode; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java index d2d9d9e6035..65cf77c43ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java @@ -37,8 +37,6 @@ * Returns the gradient of {@code QuantizeAndDequantizeV4}. * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. - * - * @param data type for {@code input_backprop} output */ @OpMetadata( opType = QuantizeAndDequantizeV4Grad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java index d8aee82efb2..77aaa257758 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java @@ -56,8 +56,6 @@ * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeDownAndShrinkRange.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java index cae65990d35..a52e49b8080 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java @@ -38,8 +38,6 @@ /** * Concatenates quantized tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java index bf2f1e9193c..c03a82caf5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java @@ -30,19 +30,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedMatMulWithBiasAndDequantize operation - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndDequantize.OP_NAME, inputsClass = QuantizedMatMulWithBiasAndDequantize.Inputs.class ) +@Operator( + group = "quantization" +) public final class QuantizedMatMulWithBiasAndDequantize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java index 32f81098f3c..b848d068a15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java @@ -30,19 +30,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; /** * The QuantizedMatMulWithBiasAndRequantize operation - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndRequantize.OP_NAME, inputsClass = QuantizedMatMulWithBiasAndRequantize.Inputs.class ) +@Operator( + group = "quantization" +) public final class QuantizedMatMulWithBiasAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java index 48bfa78ab74..0ebd2ce0e3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java @@ -43,8 +43,6 @@ * interpretation of the {@code input} data. For example, if {@code input_min} is -1.0f and * {@code input_max} is 1.0f, and we are dealing with {@code quint16} quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Requantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java index 6a960b8cc44..8f5d44bf663 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,13 +40,14 @@ * Perform dequantization on the quantized Tensor {@code input}. * Given quantized {@code input} which was quantized using {@code scales} and {@code zero_points}, performs dequantization using the formula: * dequantized_data = (quantized_data - zero_point) * scale. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformDequantize.OP_NAME, inputsClass = UniformDequantize.Inputs.class ) +@Operator( + group = "quantization" +) public final class UniformDequantize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -192,7 +194,7 @@ public static class Inputs extends RawOpInputs> { /** * The quantization min value that was used when input was quantized. * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: - * `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. */ public final long quantizationMinVal; @@ -200,7 +202,7 @@ public static class Inputs extends RawOpInputs> { /** * The quantization max value that was used when input was quantized. * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: - * `(Tout max)` for both narrow range and not narrow range. + * {@code (Tout max)} for both narrow range and not narrow range. * For example, if Tin is qint8, this is set to 127. */ public final long quantizationMaxVal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java index 866745f64ad..390ceb83d8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,13 +40,14 @@ * Perform quantization on Tensor {@code input}. * Given {@code input}, {@code scales} and {@code zero_points}, performs quantization using the formula: * quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantize.OP_NAME, inputsClass = UniformQuantize.Inputs.class ) +@Operator( + group = "quantization" +) public final class UniformQuantize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -190,17 +192,17 @@ public static class Inputs extends RawOpInputs> { public final long quantizationAxis; /** - * The quantization min value to quantize `input`. + * The quantization min value to quantize {@code input}. * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: - * `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. */ public final long quantizationMinVal; /** - * The quantization max value to quantize `input`. + * The quantization max value to quantize {@code input}. * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: - * `(Tout max)` for both narrow range and not narrow range. + * {@code (Tout max)} for both narrow range and not narrow range. * For example, if Tin is qint8, this is set to 127. */ public final long quantizationMaxVal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java index 7ad36ff574c..eff33c22ce7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -43,13 +44,14 @@ * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). * {@code output} is also quantized, using the same formula. * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedDot.OP_NAME, inputsClass = UniformQuantizedDot.Inputs.class ) +@Operator( + group = "quantization" +) public final class UniformQuantizedDot extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java index c690ea43e4d..1f30f7a1a4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -42,13 +43,14 @@ * {@code lhs} and {@code rhs} must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). * {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedDotHybrid.OP_NAME, inputsClass = UniformQuantizedDotHybrid.Inputs.class ) +@Operator( + group = "quantization" +) public final class UniformQuantizedDotHybrid extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java index 8f97998d23c..eb4c511b567 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -51,13 +52,14 @@ *
  • per-axis -> per-axis where input_quantization_axis equals output_quantization_axis. * i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformRequantize.OP_NAME, inputsClass = UniformRequantize.Inputs.class ) +@Operator( + group = "quantization" +) public final class UniformRequantize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -249,40 +251,40 @@ public static class Inputs extends RawOpInputs> { public final DataType Tout; /** - * The quantization axis that was used when quantizing original data that `input` represents. + * The quantization axis that was used when quantizing original data that {@code input} represents. * Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. * If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). */ public final long inputQuantizationAxis; /** - * The quantization min value that was used when quantizing original data that `input` represents. + * The quantization min value that was used when quantizing original data that {@code input} represents. * The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: - * `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + * {@code (Tin lowest) + 1} if narrow range, and {@code (Tin lowest)} otherwise. * For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. */ public final long inputQuantizationMinVal; /** - * The quantization max value that was used when quantizing original data that `input` represents. + * The quantization max value that was used when quantizing original data that {@code input} represents. * The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: - * `(Tout max)` for both narrow range and not narrow range. + * {@code (Tout max)} for both narrow range and not narrow range. * For example, if Tin is qint8, this is set to 127. */ public final long inputQuantizationMaxVal; /** - * The new quantization axis to use to quantize original data that `input` represents. + * The new quantization axis to use to quantize original data that {@code input} represents. */ public final long outputQuantizationAxis; /** - * The new quantization min value to quantize original data that `input` represents. + * The new quantization min value to quantize original data that {@code input} represents. */ public final long outputQuantizationMinVal; /** - * The new quantization max value to quantize original data that `input` represents. + * The new quantization max value to quantize original data that {@code input} represents. */ public final long outputQuantizationMaxVal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java index 2607b8e0fcf..0aadded3990 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RaggedBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java index d060e7baa74..720919e6873 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Performs sparse-output bin counting for a ragged tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = RaggedCountSparseOutput.OP_NAME, inputsClass = RaggedCountSparseOutput.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java index 821c0f609d4..3b356804b4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -38,15 +39,14 @@ /** * Generates a feature cross from a list of tensors, and returns it as a * RaggedTensor. See {@code tf.ragged.cross} for more details. - * - * @param data type for {@code output_values} output - * - * @param data type for {@code output_row_splits} output */ @OpMetadata( opType = RaggedCross.OP_NAME, inputsClass = RaggedCross.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedCross extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -165,8 +165,8 @@ public static class Inputs extends RawOpInputs> { public final Iterable> denseInputs; /** - * String specifying the tensor type for each input. The `i`th character in - * this string specifies the type of the `i`th input, and is one of: 'R' (ragged), + * String specifying the tensor type for each input. The {@code i}th character in + * this string specifies the type of the {@code i}th input, and is one of: 'R' (ragged), * 'D' (dense), or 'S' (sparse). This attr is used to ensure that the crossed * values are combined in the order of the inputs from the call to tf.ragged.cross. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java index 5f1b9cf66ec..d8414fd1ae3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java @@ -37,8 +37,6 @@ /** * The RaggedFillEmptyRows operation - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = RaggedFillEmptyRows.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java index 9ea15d1320a..314e4a689af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java @@ -36,8 +36,6 @@ /** * The RaggedFillEmptyRowsGrad operation - * - * @param data type for {@code d_values} output */ @OpMetadata( opType = RaggedFillEmptyRowsGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java index 98a29562a70..3c71b9987c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -55,15 +56,14 @@ * *

    (Note: This c++ op is used to implement the higher-level python * {@code tf.ragged.gather} op, which also supports ragged indices.) - * - * @param data type for {@code output_nested_splits} output - * - * @param data type for {@code output_dense_values} output */ @OpMetadata( opType = RaggedGather.OP_NAME, inputsClass = RaggedGather.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedGather extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java index 67426f99801..39a6487398e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -49,15 +50,14 @@ *

    The input tensors {@code starts}, {@code limits}, and {@code deltas} may be scalars or vectors. * The vector inputs must all have the same size. Scalar inputs are broadcast * to match the size of the vector inputs. - * - * @param data type for {@code rt_nested_splits} output - * - * @param data type for {@code rt_dense_values} output */ @OpMetadata( opType = RaggedRange.OP_NAME, inputsClass = RaggedRange.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedRange extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java index 3f3146fe837..5e9e6cae9a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -49,15 +50,14 @@ * values of the decoded {@code RaggedTensor}. If {@code input_ragged_rank} is -1, then it is * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See * {@code RaggedTensorToVariant} for the corresponding encoding logic. - * - * @param data type for {@code output_nested_splits} output - * - * @param data type for {@code output_dense_values} output */ @OpMetadata( opType = RaggedTensorFromVariant.OP_NAME, inputsClass = RaggedTensorFromVariant.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedTensorFromVariant extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -159,8 +159,8 @@ public static class Inputs extends RawOpInputs> { public final Operand encodedRagged; /** - * The ragged rank of each encoded `RaggedTensor` component in the input. If set to - * -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)` + * The ragged rank of each encoded {@code RaggedTensor} component in the input. If set to + * -1, this is inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)} */ public final long inputRaggedRank; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java index 4472f907b24..e765d995332 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -40,13 +41,14 @@ * input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) * output=SparseTensor(indices=sparse_indices, values=sparse_values, * dense_shape=sparse_dense_shape) - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = RaggedTensorToSparse.OP_NAME, inputsClass = RaggedTensorToSparse.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedTensorToSparse extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java index 0c0f474e590..1bbb93a9327 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -53,13 +54,14 @@ *

  • "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it * is preceded by "FIRST_DIM_SIZE".
  • * - * - * @param data type for {@code result} output */ @OpMetadata( opType = RaggedTensorToTensor.OP_NAME, inputsClass = RaggedTensorToTensor.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedTensorToTensor extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -205,11 +207,13 @@ public static class Inputs extends RawOpInputs + *
  • "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  • + *
  • "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  • + *
  • "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + * is preceeded by "FIRST_DIM_SIZE". + * The tensors are in the order of the dimensions.
  • + * */ public final String[] rowPartitionTypes; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java index a3d57f0c9b1..48918309097 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -51,6 +52,9 @@ opType = RaggedTensorToVariant.OP_NAME, inputsClass = RaggedTensorToVariant.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedTensorToVariant extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -130,7 +134,7 @@ public static class Inputs extends RawOpInputs { public final DataType Tsplits; /** - * A `bool` denoting whether the input is a batched `RaggedTensor`. + * A {@code bool} denoting whether the input is a batched {@code RaggedTensor}. */ public final boolean batchedInput; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java index 42836ec7aeb..ca254cd1cf5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -41,13 +42,14 @@ * op, given the variant-encoded ragged gradients of the outputs, along with * the outer row-splits and the shape of the dense-values that were provided as * inputs to the RaggedTensorToVariant op. - * - * @param data type for {@code dense_values_grad} output */ @OpMetadata( opType = RaggedTensorToVariantGradient.OP_NAME, inputsClass = RaggedTensorToVariantGradient.Inputs.class ) +@Operator( + group = "ragged" +) public final class RaggedTensorToVariantGradient extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java index 525be34462d..e7557a8d4c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = AnonymousRandomSeedGenerator.OP_NAME, inputsClass = AnonymousRandomSeedGenerator.Inputs.class ) +@Operator( + group = "random" +) public final class AnonymousRandomSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java index 976012740cc..1344c1ad9e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = AnonymousSeedGenerator.OP_NAME, inputsClass = AnonymousSeedGenerator.Inputs.class ) +@Operator( + group = "random" +) public final class AnonymousSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java index dd2caddc2f4..32b867106f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = DeleteRandomSeedGenerator.OP_NAME, inputsClass = DeleteRandomSeedGenerator.Inputs.class ) +@Operator( + group = "random" +) public final class DeleteRandomSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java index 81e91b1242c..463a9ba87a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = DeleteSeedGenerator.OP_NAME, inputsClass = DeleteSeedGenerator.Inputs.class ) +@Operator( + group = "random" +) public final class DeleteSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java index 2fef5b989e7..a73f248e303 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = DummySeedGenerator.OP_NAME, inputsClass = DummySeedGenerator.Inputs.class ) +@Operator( + group = "random" +) public final class DummySeedGenerator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java index 6412651e6ac..a213609fca6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java @@ -38,8 +38,6 @@ /** * Draws samples from a multinomial distribution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Multinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java index a417ab3d6f7..83f81ee6c51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,13 +38,14 @@ /** * Non-deterministically generates some integers. * This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NonDeterministicInts.OP_NAME, inputsClass = NonDeterministicInts.Inputs.class ) +@Operator( + group = "random" +) public final class NonDeterministicInts extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java index 262cb79226d..4bc87b4da51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java @@ -37,8 +37,6 @@ * Outputs random values from a normal distribution. The parameters may each be a * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParameterizedTruncatedNormal.OP_NAME, @@ -204,7 +202,7 @@ public static class Inputs extends RawOpInputs maxvals; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java index 6b47bf1cb15..cc1a0ab9ba6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java @@ -38,8 +38,6 @@ * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomGamma.OP_NAME, @@ -186,7 +184,7 @@ public static class Inputs extends RawOpInputs public final Operand alpha; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java index 9f1e99374f9..4ab62242717 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * Computes the derivative of a Gamma random sample w.r.t. {@code alpha}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomGammaGrad.OP_NAME, inputsClass = RandomGammaGrad.Inputs.class ) +@Operator( + group = "random" +) public final class RandomGammaGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java index 963377774cd..3e5fc40fc2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java @@ -45,8 +45,6 @@ * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomPoisson.OP_NAME, @@ -215,7 +213,7 @@ public static class Inputs extends RawOpInputs> { public final Operand rate; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java index 6e5259d7cd3..517900e7df1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java @@ -43,8 +43,6 @@ * [3, 4], ==> [1, 2], * [5, 6]] [3, 4]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomShuffle.OP_NAME, @@ -179,7 +177,7 @@ public static class Inputs extends RawOpInputs public final Operand value; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java index 70a133cd5a4..322fe10883c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java @@ -37,8 +37,6 @@ /** * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomStandardNormal.OP_NAME, @@ -174,7 +172,7 @@ public static class Inputs extends RawOpInputs> { public final Operand shape; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java index 9306d1d7fcf..5940994392c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java @@ -38,8 +38,6 @@ * Outputs random values from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomUniform.OP_NAME, @@ -175,7 +173,7 @@ public static class Inputs extends RawOpInputs> { public final Operand shape; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java index dc227152afe..6eba6a6c8b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java @@ -41,8 +41,6 @@ *

    The random integers are slightly biased unless {@code maxval - minval} is an exact * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomUniformInt.OP_NAME, @@ -190,7 +188,7 @@ public static class Inputs extends RawOpInputs maxval; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java index d54a45c272f..78b596678fa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -45,6 +46,9 @@ opType = RngReadAndSkip.OP_NAME, inputsClass = RngReadAndSkip.Inputs.class ) +@Operator( + group = "random" +) public final class RngReadAndSkip extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java index 0ecbe4d0c43..41cd0aeff19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = RngSkip.OP_NAME, inputsClass = RngSkip.Inputs.class ) +@Operator( + group = "random" +) public final class RngSkip extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java index fc03e7feddb..67bc6bf1167 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java @@ -38,8 +38,6 @@ /** * The StatefulRandomBinomial operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulRandomBinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java index 8330a9f4b49..ff905308114 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java @@ -39,8 +39,6 @@ /** * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulStandardNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java index 110501e2a87..409dff36de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -40,13 +41,14 @@ * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulTruncatedNormal.OP_NAME, inputsClass = StatefulTruncatedNormal.Inputs.class ) +@Operator( + group = "random" +) public final class StatefulTruncatedNormal extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java index 3321a260aa3..65f86463b06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -39,13 +40,14 @@ * Outputs random values from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniform.OP_NAME, inputsClass = StatefulUniform.Inputs.class ) +@Operator( + group = "random" +) public final class StatefulUniform extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java index 6f22d3a3ec7..80f425ff575 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,13 +38,14 @@ /** * Outputs random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniformFullInt.OP_NAME, inputsClass = StatefulUniformFullInt.Inputs.class ) +@Operator( + group = "random" +) public final class StatefulUniformFullInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java index acf5028ed32..d2854aea992 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -41,13 +42,14 @@ *

    The random integers are slightly biased unless {@code maxval - minval} is an exact * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniformInt.OP_NAME, inputsClass = StatefulUniformInt.Inputs.class ) +@Operator( + group = "random" +) public final class StatefulUniformInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java index 1c306047fd5..45a902b2da8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java @@ -38,8 +38,6 @@ /** * Draws samples from a multinomial distribution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessMultinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java index 9b72e0ad1b9..64f85682701 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java @@ -29,18 +29,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; /** * The StatelessParameterizedTruncatedNormal operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessParameterizedTruncatedNormal.OP_NAME, inputsClass = StatelessParameterizedTruncatedNormal.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessParameterizedTruncatedNormal extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java index 171fcd66f25..ebd295592eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -38,13 +39,14 @@ * Outputs deterministic pseudorandom random numbers from a binomial distribution. * Outputs random values from a binomial distribution. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomBinomial.OP_NAME, inputsClass = StatelessRandomBinomial.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomBinomial extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java index 36b8e0ffa39..69bd0d03ddd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -38,13 +39,14 @@ * Outputs deterministic pseudorandom random numbers from a gamma distribution. * Outputs random values from a gamma distribution. *

    The outputs are a deterministic function of the inputs. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomGamma.OP_NAME, inputsClass = StatelessRandomGamma.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomGamma extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java index cc16090aef1..30e7dd10837 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; /** @@ -39,6 +40,9 @@ opType = StatelessRandomGetAlg.OP_NAME, inputsClass = StatelessRandomGetAlg.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomGetAlg extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java index 23d1bcad77f..db52e5ba0d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = StatelessRandomGetKeyCounter.OP_NAME, inputsClass = StatelessRandomGetKeyCounter.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomGetKeyCounter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java index 212a30743b7..d98e7a1e935 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -42,6 +43,9 @@ opType = StatelessRandomGetKeyCounterAlg.OP_NAME, inputsClass = StatelessRandomGetKeyCounterAlg.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomGetKeyCounterAlg extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java index 7081e980beb..bf0fa718d0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java @@ -39,8 +39,6 @@ * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java index 9dcd8e490ea..ef4f9aafee6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -40,13 +41,14 @@ * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomNormalV2.OP_NAME, inputsClass = StatelessRandomNormalV2.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomNormalV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java index a3a152635aa..c617e49f652 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -37,13 +38,14 @@ * Outputs deterministic pseudorandom random numbers from a Poisson distribution. * Outputs random values from a Poisson distribution. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, and {@code lam}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomPoisson.OP_NAME, inputsClass = StatelessRandomPoisson.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomPoisson extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java index 6e18ceffb6f..86c24f1e171 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java @@ -40,8 +40,6 @@ * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniform.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java index 76bd5fb79ee..41e703d9ddf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -37,13 +38,14 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformFullInt.OP_NAME, inputsClass = StatelessRandomUniformFullInt.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomUniformFullInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java index 2d94bfc0d0e..7a910d86feb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -39,13 +40,14 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformFullIntV2.OP_NAME, inputsClass = StatelessRandomUniformFullIntV2.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomUniformFullIntV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java index 335e2700a56..5c792f75e51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, {@code minval}, and {@code maxval}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformInt.OP_NAME, inputsClass = StatelessRandomUniformInt.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomUniformInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java index 93fcc9b2685..ae538d14050 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -38,13 +39,14 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter}, {@code alg}, {@code minval} and {@code maxval}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformIntV2.OP_NAME, inputsClass = StatelessRandomUniformIntV2.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomUniformIntV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java index 74f97ca2dc6..86bb5202639 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -41,13 +42,14 @@ * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformV2.OP_NAME, inputsClass = StatelessRandomUniformV2.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessRandomUniformV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java index 2ddedee0436..83c4ebdab9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java @@ -41,8 +41,6 @@ * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessTruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java index 11685b3b2fc..ae8b00ae1df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -42,13 +43,14 @@ * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessTruncatedNormalV2.OP_NAME, inputsClass = StatelessTruncatedNormalV2.Inputs.class ) +@Operator( + group = "random" +) public final class StatelessTruncatedNormalV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ThreadUnsafeUnigramCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ThreadUnsafeUnigramCandidateSampler.java new file mode 100644 index 00000000000..1aff75975d1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ThreadUnsafeUnigramCandidateSampler.java @@ -0,0 +1,259 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.random; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt64; + +/** + * Generates labels for candidate sampling with a learned unigram distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + *

    For each batch, this op picks a single set of sampled candidate labels. + *

    The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + */ +@OpMetadata( + opType = ThreadUnsafeUnigramCandidateSampler.OP_NAME, + inputsClass = ThreadUnsafeUnigramCandidateSampler.Inputs.class +) +@Operator( + group = "random" +) +public final class ThreadUnsafeUnigramCandidateSampler extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ThreadUnsafeUnigramCandidateSampler"; + + private Output sampledCandidates; + + private Output trueExpectedCount; + + private Output sampledExpectedCount; + + public ThreadUnsafeUnigramCandidateSampler(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + sampledCandidates = operation.output(outputIdx++); + trueExpectedCount = operation.output(outputIdx++); + sampledExpectedCount = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ThreadUnsafeUnigramCandidateSampler operation. + * + * @param scope current scope + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of ThreadUnsafeUnigramCandidateSampler + */ + @Endpoint( + describeByClass = true + ) + public static ThreadUnsafeUnigramCandidateSampler create(Scope scope, Operand trueClasses, + Long numTrue, Long numSampled, Boolean unique, Long rangeMax, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ThreadUnsafeUnigramCandidateSampler"); + opBuilder.addInput(trueClasses.asOutput()); + opBuilder.setAttr("num_true", numTrue); + opBuilder.setAttr("num_sampled", numSampled); + opBuilder.setAttr("unique", unique); + opBuilder.setAttr("range_max", rangeMax); + if (options != null) { + for (Options opts : options) { + if (opts.seed != null) { + opBuilder.setAttr("seed", opts.seed); + } + if (opts.seed2 != null) { + opBuilder.setAttr("seed2", opts.seed2); + } + } + } + return new ThreadUnsafeUnigramCandidateSampler(opBuilder.build()); + } + + /** + * Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + */ + public static Options seed(Long seed) { + return new Options().seed(seed); + } + + /** + * Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public static Options seed2(Long seed2) { + return new Options().seed2(seed2); + } + + /** + * Gets sampledCandidates. + * A vector of length num_sampled, in which each element is + * the ID of a sampled candidate. + * @return sampledCandidates. + */ + public Output sampledCandidates() { + return sampledCandidates; + } + + /** + * Gets trueExpectedCount. + * A batch_size * num_true matrix, representing + * the number of times each candidate is expected to occur in a batch + * of sampled candidates. If unique=true, then this is a probability. + * @return trueExpectedCount. + */ + public Output trueExpectedCount() { + return trueExpectedCount; + } + + /** + * Gets sampledExpectedCount. + * A vector of length num_sampled, for each sampled + * candidate representing the number of times the candidate is expected + * to occur in a batch of sampled candidates. If unique=true, then this is a + * probability. + * @return sampledExpectedCount. + */ + public Output sampledExpectedCount() { + return sampledExpectedCount; + } + + /** + * Optional attributes for {@link org.tensorflow.op.random.ThreadUnsafeUnigramCandidateSampler} + */ + public static class Options { + private Long seed; + + private Long seed2; + + private Options() { + } + + /** + * Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + */ + public Options seed(Long seed) { + this.seed = seed; + return this; + } + + /** + * Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public Options seed2(Long seed2) { + this.seed2 = seed2; + return this; + } + } + + @OpInputsMetadata( + outputsClass = ThreadUnsafeUnigramCandidateSampler.class + ) + public static class Inputs extends RawOpInputs { + /** + * A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + */ + public final Operand trueClasses; + + /** + * Number of true labels per context. + */ + public final long numTrue; + + /** + * Number of candidates to randomly sample. + */ + public final long numSampled; + + /** + * If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + */ + public final boolean unique; + + /** + * The sampler will sample integers from the interval [0, range_max). + */ + public final long rangeMax; + + /** + * If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + */ + public final long seed; + + /** + * An second seed to avoid seed collision. + */ + public final long seed2; + + public Inputs(GraphOperation op) { + super(new ThreadUnsafeUnigramCandidateSampler(op), op, Arrays.asList("num_true", "num_sampled", "unique", "range_max", "seed", "seed2")); + int inputIndex = 0; + trueClasses = (Operand) op.input(inputIndex++); + numTrue = op.attributes().getAttrInt("num_true"); + numSampled = op.attributes().getAttrInt("num_sampled"); + unique = op.attributes().getAttrBool("unique"); + rangeMax = op.attributes().getAttrInt("range_max"); + seed = op.attributes().getAttrInt("seed"); + seed2 = op.attributes().getAttrInt("seed2"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java index 6b986b4e08e..36fbe8a2a05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java @@ -39,8 +39,6 @@ * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TruncatedNormal.OP_NAME, @@ -177,7 +175,7 @@ public static class Inputs extends RawOpInputs> { public final Operand shape; /** - * If either `seed` or `seed2` are set to be non-zero, the random number + * If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java index 44d41784c45..dc17294084b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -44,13 +45,14 @@ * [5, 6]] [3, 4]] * *

    The outputs are a deterministic function of {@code value}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessShuffle.OP_NAME, inputsClass = StatelessShuffle.Inputs.class ) +@Operator( + group = "random.experimental" +) public final class StatelessShuffle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java deleted file mode 100644 index 17034cab166..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscAbs operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscAbs.OP_NAME, - inputsClass = RiscAbs.Inputs.class -) -public final class RiscAbs extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscAbs"; - - private Output y; - - public RiscAbs(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscAbs operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscAbs} output and operands - * @return a new instance of RiscAbs - */ - @Endpoint( - describeByClass = true - ) - public static RiscAbs create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscAbs"); - opBuilder.addInput(x.asOutput()); - return new RiscAbs<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscAbs.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscAbs<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java deleted file mode 100644 index aaa550bc070..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * Returns x + y element-wise. - * NOTE: {@code risc.RiscAdd} does not supports broadcasting. - *

    Given two input tensors, the {@code tf.risc_add} operation computes the sum for every element in the tensor. - *

    Both input and output have a range {@code (-inf, inf)}. - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscAdd.OP_NAME, - inputsClass = RiscAdd.Inputs.class -) -public final class RiscAdd extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscAdd"; - - private Output z; - - public RiscAdd(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscAdd operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscAdd} output and operands - * @return a new instance of RiscAdd - */ - @Endpoint( - describeByClass = true - ) - public static RiscAdd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscAdd"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscAdd<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscAdd.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscAdd<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java deleted file mode 100644 index 5ab050f4c6b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscBinaryArithmetic operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscBinaryArithmetic.OP_NAME, - inputsClass = RiscBinaryArithmetic.Inputs.class -) -public final class RiscBinaryArithmetic extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscBinaryArithmetic"; - - private Output z; - - public RiscBinaryArithmetic(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscBinaryArithmetic operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param opType The value of the opType attribute - * @param data type for {@code RiscBinaryArithmetic} output and operands - * @return a new instance of RiscBinaryArithmetic - */ - @Endpoint( - describeByClass = true - ) - public static RiscBinaryArithmetic create(Scope scope, Operand x, - Operand y, String opType) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscBinaryArithmetic"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - opBuilder.setAttr("op_type", opType); - return new RiscBinaryArithmetic<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscBinaryArithmetic.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The opType attribute - */ - public final String opType; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscBinaryArithmetic<>(op), op, Arrays.asList("op_type", "T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - opType = op.attributes().getAttrString("op_type"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java deleted file mode 100644 index 5881c9bf342..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscBinaryComparison operation - */ -@OpMetadata( - opType = RiscBinaryComparison.OP_NAME, - inputsClass = RiscBinaryComparison.Inputs.class -) -public final class RiscBinaryComparison extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscBinaryComparison"; - - private Output z; - - public RiscBinaryComparison(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscBinaryComparison operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param opType The value of the opType attribute - * @param data type for {@code RiscBinaryComparison} output and operands - * @return a new instance of RiscBinaryComparison - */ - @Endpoint( - describeByClass = true - ) - public static RiscBinaryComparison create(Scope scope, Operand x, - Operand y, String opType) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscBinaryComparison"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - opBuilder.setAttr("op_type", opType); - return new RiscBinaryComparison(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscBinaryComparison.class - ) - public static class Inputs extends RawOpInputs { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The opType attribute - */ - public final String opType; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscBinaryComparison(op), op, Arrays.asList("op_type", "T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - opType = op.attributes().getAttrString("op_type"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java deleted file mode 100644 index 89d34941e52..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TType; - -/** - * The RiscBitcast operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscBitcast.OP_NAME, - inputsClass = RiscBitcast.Inputs.class -) -public final class RiscBitcast extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscBitcast"; - - private Output y; - - public RiscBitcast(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscBitcast operation. - * - * @param scope current scope - * @param x The x value - * @param DstT The value of the DstT attribute - * @param data type for {@code RiscBitcast} output and operands - * @return a new instance of RiscBitcast - */ - @Endpoint( - describeByClass = true - ) - public static RiscBitcast create(Scope scope, Operand x, - Class DstT) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscBitcast"); - opBuilder.addInput(x.asOutput()); - opBuilder.setAttr("DstT", Operands.toDataType(DstT)); - return new RiscBitcast<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscBitcast.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The SrcT attribute - */ - public final DataType SrcT; - - /** - * The DstT attribute - */ - public final DataType DstT; - - public Inputs(GraphOperation op) { - super(new RiscBitcast<>(op), op, Arrays.asList("SrcT", "DstT")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - SrcT = op.attributes().getAttrType("SrcT"); - DstT = op.attributes().getAttrType("DstT"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java deleted file mode 100644 index 873f1ddde89..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscBroadcast operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscBroadcast.OP_NAME, - inputsClass = RiscBroadcast.Inputs.class -) -public final class RiscBroadcast extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscBroadcast"; - - private Output output; - - public RiscBroadcast(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscBroadcast operation. - * - * @param scope current scope - * @param input The input value - * @param shape The shape value - * @param data type for {@code RiscBroadcast} output and operands - * @return a new instance of RiscBroadcast - */ - @Endpoint( - describeByClass = true - ) - public static RiscBroadcast create(Scope scope, Operand input, - Operand shape) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscBroadcast"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(shape.asOutput()); - return new RiscBroadcast<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscBroadcast.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The shape input - */ - public final Operand shape; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tidx attribute - */ - public final DataType Tidx; - - public Inputs(GraphOperation op) { - super(new RiscBroadcast<>(op), op, Arrays.asList("T", "Tidx")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - shape = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tidx = op.attributes().getAttrType("Tidx"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java deleted file mode 100644 index 764a86428d8..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TType; - -/** - * The RiscCast operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscCast.OP_NAME, - inputsClass = RiscCast.Inputs.class -) -public final class RiscCast extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscCast"; - - private Output y; - - public RiscCast(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscCast operation. - * - * @param scope current scope - * @param x The x value - * @param DstT The value of the DstT attribute - * @param data type for {@code RiscCast} output and operands - * @return a new instance of RiscCast - */ - @Endpoint( - describeByClass = true - ) - public static RiscCast create(Scope scope, Operand x, - Class DstT) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscCast"); - opBuilder.addInput(x.asOutput()); - opBuilder.setAttr("DstT", Operands.toDataType(DstT)); - return new RiscCast<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscCast.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The SrcT attribute - */ - public final DataType SrcT; - - /** - * The DstT attribute - */ - public final DataType DstT; - - public Inputs(GraphOperation op) { - super(new RiscCast<>(op), op, Arrays.asList("SrcT", "DstT")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - SrcT = op.attributes().getAttrType("SrcT"); - DstT = op.attributes().getAttrType("DstT"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java deleted file mode 100644 index c252b4e31d2..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscCeil operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscCeil.OP_NAME, - inputsClass = RiscCeil.Inputs.class -) -public final class RiscCeil extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscCeil"; - - private Output y; - - public RiscCeil(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscCeil operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscCeil} output and operands - * @return a new instance of RiscCeil - */ - @Endpoint( - describeByClass = true - ) - public static RiscCeil create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscCeil"); - opBuilder.addInput(x.asOutput()); - return new RiscCeil<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscCeil.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscCeil<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java deleted file mode 100644 index 4fb947711c0..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscConcat operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscConcat.OP_NAME, - inputsClass = RiscConcat.Inputs.class -) -public final class RiscConcat extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscConcat"; - - private Output output; - - public RiscConcat(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscConcat operation. - * - * @param scope current scope - * @param values The values value - * @param axis The axis value - * @param data type for {@code RiscConcat} output and operands - * @return a new instance of RiscConcat - */ - @Endpoint( - describeByClass = true - ) - public static RiscConcat create(Scope scope, Iterable> values, - Operand axis) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscConcat"); - opBuilder.addInputList(Operands.asOutputs(values)); - opBuilder.addInput(axis.asOutput()); - return new RiscConcat<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscConcat.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The values input - */ - public final Iterable> values; - - /** - * The axis input - */ - public final Operand axis; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tidx attribute - */ - public final DataType Tidx; - - public Inputs(GraphOperation op) { - super(new RiscConcat<>(op), op, Arrays.asList("T", "Tidx")); - int inputIndex = 0; - int valuesLength = op.inputListLength("values"); - values = Arrays.asList((Operand[]) op.inputList(inputIndex, valuesLength)); - inputIndex += valuesLength; - axis = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tidx = op.attributes().getAttrType("Tidx"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java deleted file mode 100644 index 53e2c186ba3..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.ConcreteFunction; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscCondition operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscCondition.OP_NAME, - inputsClass = RiscCondition.Inputs.class -) -public final class RiscCondition extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscCondition"; - - private Output output; - - public RiscCondition(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscCondition operation. - * - * @param scope current scope - * @param pred The pred value - * @param inputTrue The inputTrue value - * @param inputFalse The inputFalse value - * @param funcTrue The value of the funcTrue attribute - * @param funcFalse The value of the funcFalse attribute - * @param DstT The value of the DstT attribute - * @param data type for {@code RiscCondition} output and operands - * @param data type for {@code RiscCondition} output and operands - * @return a new instance of RiscCondition - */ - @Endpoint( - describeByClass = true - ) - public static RiscCondition create(Scope scope, - Operand pred, Operand inputTrue, Operand inputFalse, ConcreteFunction funcTrue, - ConcreteFunction funcFalse, Class DstT) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscCondition"); - opBuilder.addInput(pred.asOutput()); - opBuilder.addInput(inputTrue.asOutput()); - opBuilder.addInput(inputFalse.asOutput()); - opBuilder.setAttr("func_true", funcTrue); - opBuilder.setAttr("func_false", funcFalse); - opBuilder.setAttr("DstT", Operands.toDataType(DstT)); - return new RiscCondition<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscCondition.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The pred input - */ - public final Operand pred; - - /** - * The inputTrue input - */ - public final Operand inputTrue; - - /** - * The inputFalse input - */ - public final Operand inputFalse; - - /** - * The SrcT attribute - */ - public final DataType SrcT; - - /** - * The DstT attribute - */ - public final DataType DstT; - - public Inputs(GraphOperation op) { - super(new RiscCondition<>(op), op, Arrays.asList("SrcT", "DstT")); - int inputIndex = 0; - pred = (Operand) op.input(inputIndex++); - inputTrue = (Operand) op.input(inputIndex++); - inputFalse = (Operand) op.input(inputIndex++); - SrcT = op.attributes().getAttrType("SrcT"); - DstT = op.attributes().getAttrType("DstT"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java deleted file mode 100644 index 0b910dc463b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscConv operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscConv.OP_NAME, - inputsClass = RiscConv.Inputs.class -) -public final class RiscConv extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscConv"; - - private Output output; - - public RiscConv(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscConv operation. - * - * @param scope current scope - * @param input The input value - * @param filter The filter value - * @param strides The value of the strides attribute - * @param options carries optional attribute values - * @param data type for {@code RiscConv} output and operands - * @return a new instance of RiscConv - */ - @Endpoint( - describeByClass = true - ) - public static RiscConv create(Scope scope, Operand input, - Operand filter, List strides, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscConv"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(filter.asOutput()); - long[] stridesArray = new long[strides.size()]; - for (int i = 0 ; i < stridesArray.length ; i++) { - stridesArray[i] = strides.get(i); - } - opBuilder.setAttr("strides", stridesArray); - if (options != null) { - for (Options opts : options) { - if (opts.dataFormat != null) { - opBuilder.setAttr("data_format", opts.dataFormat); - } - if (opts.dilations != null) { - long[] dilationsArray = new long[opts.dilations.size()]; - for (int i = 0 ; i < dilationsArray.length ; i++) { - dilationsArray[i] = opts.dilations.get(i); - } - opBuilder.setAttr("dilations", dilationsArray); - } - } - } - return new RiscConv<>(opBuilder.build()); - } - - /** - * Sets the dataFormat option. - * - * @param dataFormat the dataFormat option - * @return this Options instance. - */ - public static Options dataFormat(String dataFormat) { - return new Options().dataFormat(dataFormat); - } - - /** - * Sets the dilations option. - * - * @param dilations the dilations option - * @return this Options instance. - */ - public static Options dilations(List dilations) { - return new Options().dilations(dilations); - } - - /** - * Sets the dilations option. - * - * @param dilations the dilations option - * @return this Options instance. - */ - public static Options dilations(Long... dilations) { - return new Options().dilations(dilations); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscConv} - */ - public static class Options { - private String dataFormat; - - private List dilations; - - private Options() { - } - - /** - * Sets the dataFormat option. - * - * @param dataFormat the dataFormat option - * @return this Options instance. - */ - public Options dataFormat(String dataFormat) { - this.dataFormat = dataFormat; - return this; - } - - /** - * Sets the dilations option. - * - * @param dilations the dilations option - * @return this Options instance. - */ - public Options dilations(List dilations) { - this.dilations = dilations; - return this; - } - - /** - * Sets the dilations option. - * - * @param dilations the dilations option - * @return this Options instance. - */ - public Options dilations(Long... dilations) { - this.dilations = Arrays.asList(dilations); - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscConv.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The filter input - */ - public final Operand filter; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The strides attribute - */ - public final long[] strides; - - /** - * The dataFormat attribute - */ - public final String dataFormat; - - /** - * The dilations attribute - */ - public final long[] dilations; - - public Inputs(GraphOperation op) { - super(new RiscConv<>(op), op, Arrays.asList("T", "strides", "data_format", "dilations")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - filter = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - strides = op.attributes().getAttrIntList("strides"); - dataFormat = op.attributes().getAttrString("data_format"); - dilations = op.attributes().getAttrIntList("dilations"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java deleted file mode 100644 index c150d5397b2..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscCos operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscCos.OP_NAME, - inputsClass = RiscCos.Inputs.class -) -public final class RiscCos extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscCos"; - - private Output y; - - public RiscCos(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscCos operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscCos} output and operands - * @return a new instance of RiscCos - */ - @Endpoint( - describeByClass = true - ) - public static RiscCos create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscCos"); - opBuilder.addInput(x.asOutput()); - return new RiscCos<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscCos.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscCos<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java deleted file mode 100644 index 6556fe5c2dd..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscDiv operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscDiv.OP_NAME, - inputsClass = RiscDiv.Inputs.class -) -public final class RiscDiv extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscDiv"; - - private Output z; - - public RiscDiv(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscDiv operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscDiv} output and operands - * @return a new instance of RiscDiv - */ - @Endpoint( - describeByClass = true - ) - public static RiscDiv create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscDiv"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscDiv<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscDiv.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscDiv<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java deleted file mode 100644 index 97d34e57a55..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java +++ /dev/null @@ -1,196 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscDot operation - * - * @param data type for {@code product} output - */ -@OpMetadata( - opType = RiscDot.OP_NAME, - inputsClass = RiscDot.Inputs.class -) -public final class RiscDot extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscDot"; - - private Output product; - - public RiscDot(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - product = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscDot operation. - * - * @param scope current scope - * @param a The a value - * @param b The b value - * @param options carries optional attribute values - * @param data type for {@code RiscDot} output and operands - * @return a new instance of RiscDot - */ - @Endpoint( - describeByClass = true - ) - public static RiscDot create(Scope scope, Operand a, Operand b, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscDot"); - opBuilder.addInput(a.asOutput()); - opBuilder.addInput(b.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.transposeA != null) { - opBuilder.setAttr("transpose_a", opts.transposeA); - } - if (opts.transposeB != null) { - opBuilder.setAttr("transpose_b", opts.transposeB); - } - } - } - return new RiscDot<>(opBuilder.build()); - } - - /** - * Sets the transposeA option. - * - * @param transposeA the transposeA option - * @return this Options instance. - */ - public static Options transposeA(Boolean transposeA) { - return new Options().transposeA(transposeA); - } - - /** - * Sets the transposeB option. - * - * @param transposeB the transposeB option - * @return this Options instance. - */ - public static Options transposeB(Boolean transposeB) { - return new Options().transposeB(transposeB); - } - - /** - * Gets product. - * - * @return product. - */ - public Output product() { - return product; - } - - @Override - public Output asOutput() { - return product; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscDot} - */ - public static class Options { - private Boolean transposeA; - - private Boolean transposeB; - - private Options() { - } - - /** - * Sets the transposeA option. - * - * @param transposeA the transposeA option - * @return this Options instance. - */ - public Options transposeA(Boolean transposeA) { - this.transposeA = transposeA; - return this; - } - - /** - * Sets the transposeB option. - * - * @param transposeB the transposeB option - * @return this Options instance. - */ - public Options transposeB(Boolean transposeB) { - this.transposeB = transposeB; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscDot.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The a input - */ - public final Operand a; - - /** - * The b input - */ - public final Operand b; - - /** - * The transposeA attribute - */ - public final boolean transposeA; - - /** - * The transposeB attribute - */ - public final boolean transposeB; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscDot<>(op), op, Arrays.asList("transpose_a", "transpose_b", "T")); - int inputIndex = 0; - a = (Operand) op.input(inputIndex++); - b = (Operand) op.input(inputIndex++); - transposeA = op.attributes().getAttrBool("transpose_a"); - transposeB = op.attributes().getAttrBool("transpose_b"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java deleted file mode 100644 index f0b7b04ac98..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscExp operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscExp.OP_NAME, - inputsClass = RiscExp.Inputs.class -) -public final class RiscExp extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscExp"; - - private Output y; - - public RiscExp(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscExp operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscExp} output and operands - * @return a new instance of RiscExp - */ - @Endpoint( - describeByClass = true - ) - public static RiscExp create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscExp"); - opBuilder.addInput(x.asOutput()); - return new RiscExp<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscExp.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscExp<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java deleted file mode 100644 index 20ec1a8ba17..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscFloor operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscFloor.OP_NAME, - inputsClass = RiscFloor.Inputs.class -) -public final class RiscFloor extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscFloor"; - - private Output y; - - public RiscFloor(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscFloor operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscFloor} output and operands - * @return a new instance of RiscFloor - */ - @Endpoint( - describeByClass = true - ) - public static RiscFloor create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscFloor"); - opBuilder.addInput(x.asOutput()); - return new RiscFloor<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscFloor.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscFloor<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java deleted file mode 100644 index ff6b5418760..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscGather operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscGather.OP_NAME, - inputsClass = RiscGather.Inputs.class -) -public final class RiscGather extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscGather"; - - private Output output; - - public RiscGather(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscGather operation. - * - * @param scope current scope - * @param params The params value - * @param indices The indices value - * @param axis The axis value - * @param options carries optional attribute values - * @param data type for {@code RiscGather} output and operands - * @return a new instance of RiscGather - */ - @Endpoint( - describeByClass = true - ) - public static RiscGather create(Scope scope, Operand params, - Operand indices, Operand axis, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscGather"); - opBuilder.addInput(params.asOutput()); - opBuilder.addInput(indices.asOutput()); - opBuilder.addInput(axis.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.batchDims != null) { - opBuilder.setAttr("batch_dims", opts.batchDims); - } - } - } - return new RiscGather<>(opBuilder.build()); - } - - /** - * Sets the batchDims option. - * - * @param batchDims the batchDims option - * @return this Options instance. - */ - public static Options batchDims(Long batchDims) { - return new Options().batchDims(batchDims); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscGather} - */ - public static class Options { - private Long batchDims; - - private Options() { - } - - /** - * Sets the batchDims option. - * - * @param batchDims the batchDims option - * @return this Options instance. - */ - public Options batchDims(Long batchDims) { - this.batchDims = batchDims; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscGather.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The params input - */ - public final Operand params; - - /** - * The indices input - */ - public final Operand indices; - - /** - * The axis input - */ - public final Operand axis; - - /** - * The batchDims attribute - */ - public final long batchDims; - - /** - * The Tparams attribute - */ - public final DataType Tparams; - - /** - * The Tindices attribute - */ - public final DataType Tindices; - - /** - * The Taxis attribute - */ - public final DataType Taxis; - - public Inputs(GraphOperation op) { - super(new RiscGather<>(op), op, Arrays.asList("batch_dims", "Tparams", "Tindices", "Taxis")); - int inputIndex = 0; - params = (Operand) op.input(inputIndex++); - indices = (Operand) op.input(inputIndex++); - axis = (Operand) op.input(inputIndex++); - batchDims = op.attributes().getAttrInt("batch_dims"); - Tparams = op.attributes().getAttrType("Tparams"); - Tindices = op.attributes().getAttrType("Tindices"); - Taxis = op.attributes().getAttrType("Taxis"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java deleted file mode 100644 index 6fa65b2f301..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscImag operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscImag.OP_NAME, - inputsClass = RiscImag.Inputs.class -) -public final class RiscImag extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscImag"; - - private Output output; - - public RiscImag(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscImag operation. - * - * @param scope current scope - * @param input The input value - * @param Tout The value of the Tout attribute - * @param data type for {@code RiscImag} output and operands - * @return a new instance of RiscImag - */ - @Endpoint( - describeByClass = true - ) - public static RiscImag create(Scope scope, Operand input, - Class Tout) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscImag"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("Tout", Operands.toDataType(Tout)); - return new RiscImag<>(opBuilder.build()); - } - - /** - * Factory method to create a class wrapping a new RiscImag operation, with the default output types. - * - * @param scope current scope - * @param input The input value - * @return a new instance of RiscImag, with default output types - */ - @Endpoint( - describeByClass = true - ) - public static RiscImag create(Scope scope, Operand input) { - return create(scope, input, TFloat32.class); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscImag.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tout attribute - */ - public final DataType Tout; - - public Inputs(GraphOperation op) { - super(new RiscImag<>(op), op, Arrays.asList("T", "Tout")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tout = op.attributes().getAttrType("Tout"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java deleted file mode 100644 index 45772d5b001..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java +++ /dev/null @@ -1,108 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscIsFinite operation - */ -@OpMetadata( - opType = RiscIsFinite.OP_NAME, - inputsClass = RiscIsFinite.Inputs.class -) -public final class RiscIsFinite extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscIsFinite"; - - private Output y; - - public RiscIsFinite(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscIsFinite operation. - * - * @param scope current scope - * @param x The x value - * @return a new instance of RiscIsFinite - */ - @Endpoint( - describeByClass = true - ) - public static RiscIsFinite create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscIsFinite"); - opBuilder.addInput(x.asOutput()); - return new RiscIsFinite(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscIsFinite.class - ) - public static class Inputs extends RawOpInputs { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscIsFinite(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java deleted file mode 100644 index 2418350f067..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscLog operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscLog.OP_NAME, - inputsClass = RiscLog.Inputs.class -) -public final class RiscLog extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscLog"; - - private Output y; - - public RiscLog(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscLog operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscLog} output and operands - * @return a new instance of RiscLog - */ - @Endpoint( - describeByClass = true - ) - public static RiscLog create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscLog"); - opBuilder.addInput(x.asOutput()); - return new RiscLog<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscLog.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscLog<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java deleted file mode 100644 index f3709d295b6..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * Returns max(x, y) element-wise. - * NOTE: {@code risc.RiscMax} does not supports broadcasting. - *

    Given two input tensors, the {@code tf.risc_max} operation computes the maximum for every element in the tensor. - * - * @param data type for {@code max} output - */ -@OpMetadata( - opType = RiscMax.OP_NAME, - inputsClass = RiscMax.Inputs.class -) -public final class RiscMax extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscMax"; - - private Output max; - - public RiscMax(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - max = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscMax operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscMax} output and operands - * @return a new instance of RiscMax - */ - @Endpoint( - describeByClass = true - ) - public static RiscMax create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscMax"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscMax<>(opBuilder.build()); - } - - /** - * Gets max. - * - * @return max. - */ - public Output max() { - return max; - } - - @Override - public Output asOutput() { - return max; - } - - @OpInputsMetadata( - outputsClass = RiscMax.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscMax<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java deleted file mode 100644 index 33b088561b7..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscMin operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscMin.OP_NAME, - inputsClass = RiscMin.Inputs.class -) -public final class RiscMin extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscMin"; - - private Output z; - - public RiscMin(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscMin operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscMin} output and operands - * @return a new instance of RiscMin - */ - @Endpoint( - describeByClass = true - ) - public static RiscMin create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscMin"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscMin<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscMin.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscMin<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java deleted file mode 100644 index bc51b7fd9ca..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscMul operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscMul.OP_NAME, - inputsClass = RiscMul.Inputs.class -) -public final class RiscMul extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscMul"; - - private Output z; - - public RiscMul(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscMul operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscMul} output and operands - * @return a new instance of RiscMul - */ - @Endpoint( - describeByClass = true - ) - public static RiscMul create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscMul"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscMul<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscMul.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscMul<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java deleted file mode 100644 index 0eacdcd5c08..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscNeg operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscNeg.OP_NAME, - inputsClass = RiscNeg.Inputs.class -) -public final class RiscNeg extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscNeg"; - - private Output y; - - public RiscNeg(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscNeg operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscNeg} output and operands - * @return a new instance of RiscNeg - */ - @Endpoint( - describeByClass = true - ) - public static RiscNeg create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscNeg"); - opBuilder.addInput(x.asOutput()); - return new RiscNeg<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscNeg.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscNeg<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java deleted file mode 100644 index 7c9178eb5cf..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscPad operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscPad.OP_NAME, - inputsClass = RiscPad.Inputs.class -) -public final class RiscPad extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscPad"; - - private Output output; - - public RiscPad(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscPad operation. - * - * @param scope current scope - * @param input The input value - * @param paddings The paddings value - * @param constantValues The constantValues value - * @param data type for {@code RiscPad} output and operands - * @return a new instance of RiscPad - */ - @Endpoint( - describeByClass = true - ) - public static RiscPad create(Scope scope, Operand input, - Operand paddings, Operand constantValues) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscPad"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(paddings.asOutput()); - opBuilder.addInput(constantValues.asOutput()); - return new RiscPad<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscPad.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The paddings input - */ - public final Operand paddings; - - /** - * The constantValues input - */ - public final Operand constantValues; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tpaddings attribute - */ - public final DataType Tpaddings; - - public Inputs(GraphOperation op) { - super(new RiscPad<>(op), op, Arrays.asList("T", "Tpaddings")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - paddings = (Operand) op.input(inputIndex++); - constantValues = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tpaddings = op.attributes().getAttrType("Tpaddings"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java deleted file mode 100644 index fe9574f672d..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java +++ /dev/null @@ -1,189 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscPool operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscPool.OP_NAME, - inputsClass = RiscPool.Inputs.class -) -public final class RiscPool extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscPool"; - - private Output output; - - public RiscPool(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscPool operation. - * - * @param scope current scope - * @param value The value value - * @param ksize The value of the ksize attribute - * @param strides The value of the strides attribute - * @param poolingType The value of the poolingType attribute - * @param options carries optional attribute values - * @param data type for {@code RiscPool} output and operands - * @return a new instance of RiscPool - */ - @Endpoint( - describeByClass = true - ) - public static RiscPool create(Scope scope, Operand value, - List ksize, List strides, String poolingType, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscPool"); - opBuilder.addInput(value.asOutput()); - long[] ksizeArray = new long[ksize.size()]; - for (int i = 0 ; i < ksizeArray.length ; i++) { - ksizeArray[i] = ksize.get(i); - } - opBuilder.setAttr("ksize", ksizeArray); - long[] stridesArray = new long[strides.size()]; - for (int i = 0 ; i < stridesArray.length ; i++) { - stridesArray[i] = strides.get(i); - } - opBuilder.setAttr("strides", stridesArray); - opBuilder.setAttr("pooling_type", poolingType); - if (options != null) { - for (Options opts : options) { - if (opts.dataFormat != null) { - opBuilder.setAttr("data_format", opts.dataFormat); - } - } - } - return new RiscPool<>(opBuilder.build()); - } - - /** - * Sets the dataFormat option. - * - * @param dataFormat the dataFormat option - * @return this Options instance. - */ - public static Options dataFormat(String dataFormat) { - return new Options().dataFormat(dataFormat); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscPool} - */ - public static class Options { - private String dataFormat; - - private Options() { - } - - /** - * Sets the dataFormat option. - * - * @param dataFormat the dataFormat option - * @return this Options instance. - */ - public Options dataFormat(String dataFormat) { - this.dataFormat = dataFormat; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscPool.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The value input - */ - public final Operand value; - - /** - * The ksize attribute - */ - public final long[] ksize; - - /** - * The strides attribute - */ - public final long[] strides; - - /** - * The poolingType attribute - */ - public final String poolingType; - - /** - * The dataFormat attribute - */ - public final String dataFormat; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscPool<>(op), op, Arrays.asList("ksize", "strides", "pooling_type", "data_format", "T")); - int inputIndex = 0; - value = (Operand) op.input(inputIndex++); - ksize = op.attributes().getAttrIntList("ksize"); - strides = op.attributes().getAttrIntList("strides"); - poolingType = op.attributes().getAttrString("pooling_type"); - dataFormat = op.attributes().getAttrString("data_format"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java deleted file mode 100644 index c33db0add83..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscPow operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscPow.OP_NAME, - inputsClass = RiscPow.Inputs.class -) -public final class RiscPow extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscPow"; - - private Output z; - - public RiscPow(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscPow operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscPow} output and operands - * @return a new instance of RiscPow - */ - @Endpoint( - describeByClass = true - ) - public static RiscPow create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscPow"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscPow<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscPow.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscPow<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java deleted file mode 100644 index 5c7bee6d0fe..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java +++ /dev/null @@ -1,154 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscRandomUniform operation - */ -@OpMetadata( - opType = RiscRandomUniform.OP_NAME, - inputsClass = RiscRandomUniform.Inputs.class -) -public final class RiscRandomUniform extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscRandomUniform"; - - private Output output; - - public RiscRandomUniform(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscRandomUniform operation. - * - * @param scope current scope - * @param shape The shape value - * @param options carries optional attribute values - * @return a new instance of RiscRandomUniform - */ - @Endpoint( - describeByClass = true - ) - public static RiscRandomUniform create(Scope scope, Operand shape, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscRandomUniform"); - opBuilder.addInput(shape.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.seed != null) { - opBuilder.setAttr("seed", opts.seed); - } - } - } - return new RiscRandomUniform(opBuilder.build()); - } - - /** - * Sets the seed option. - * - * @param seed the seed option - * @return this Options instance. - */ - public static Options seed(Long seed) { - return new Options().seed(seed); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscRandomUniform} - */ - public static class Options { - private Long seed; - - private Options() { - } - - /** - * Sets the seed option. - * - * @param seed the seed option - * @return this Options instance. - */ - public Options seed(Long seed) { - this.seed = seed; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscRandomUniform.class - ) - public static class Inputs extends RawOpInputs { - /** - * The shape input - */ - public final Operand shape; - - /** - * The seed attribute - */ - public final long seed; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscRandomUniform(op), op, Arrays.asList("seed", "T")); - int inputIndex = 0; - shape = (Operand) op.input(inputIndex++); - seed = op.attributes().getAttrInt("seed"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java deleted file mode 100644 index 66e96fabc27..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TFloat32; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscReal operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscReal.OP_NAME, - inputsClass = RiscReal.Inputs.class -) -public final class RiscReal extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscReal"; - - private Output output; - - public RiscReal(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscReal operation. - * - * @param scope current scope - * @param input The input value - * @param Tout The value of the Tout attribute - * @param data type for {@code RiscReal} output and operands - * @return a new instance of RiscReal - */ - @Endpoint( - describeByClass = true - ) - public static RiscReal create(Scope scope, Operand input, - Class Tout) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscReal"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("Tout", Operands.toDataType(Tout)); - return new RiscReal<>(opBuilder.build()); - } - - /** - * Factory method to create a class wrapping a new RiscReal operation, with the default output types. - * - * @param scope current scope - * @param input The input value - * @return a new instance of RiscReal, with default output types - */ - @Endpoint( - describeByClass = true - ) - public static RiscReal create(Scope scope, Operand input) { - return create(scope, input, TFloat32.class); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscReal.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tout attribute - */ - public final DataType Tout; - - public Inputs(GraphOperation op) { - super(new RiscReal<>(op), op, Arrays.asList("T", "Tout")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tout = op.attributes().getAttrType("Tout"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java deleted file mode 100644 index a978e967ab5..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscReduce operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscReduce.OP_NAME, - inputsClass = RiscReduce.Inputs.class -) -public final class RiscReduce extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscReduce"; - - private Output output; - - public RiscReduce(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscReduce operation. - * - * @param scope current scope - * @param tensor The tensor value - * @param axis The axis value - * @param reduceType The value of the reduceType attribute - * @param data type for {@code RiscReduce} output and operands - * @return a new instance of RiscReduce - */ - @Endpoint( - describeByClass = true - ) - public static RiscReduce create(Scope scope, Operand tensor, - Operand axis, String reduceType) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscReduce"); - opBuilder.addInput(tensor.asOutput()); - opBuilder.addInput(axis.asOutput()); - opBuilder.setAttr("reduce_type", reduceType); - return new RiscReduce<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscReduce.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The tensor input - */ - public final Operand tensor; - - /** - * The axis input - */ - public final Operand axis; - - /** - * The reduceType attribute - */ - public final String reduceType; - - /** - * The Index attribute - */ - public final DataType Index; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscReduce<>(op), op, Arrays.asList("reduce_type", "Index", "T")); - int inputIndex = 0; - tensor = (Operand) op.input(inputIndex++); - axis = (Operand) op.input(inputIndex++); - reduceType = op.attributes().getAttrString("reduce_type"); - Index = op.attributes().getAttrType("Index"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java deleted file mode 100644 index 4e87ed630c9..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscRem operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscRem.OP_NAME, - inputsClass = RiscRem.Inputs.class -) -public final class RiscRem extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscRem"; - - private Output z; - - public RiscRem(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscRem operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscRem} output and operands - * @return a new instance of RiscRem - */ - @Endpoint( - describeByClass = true - ) - public static RiscRem create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscRem"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscRem<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscRem.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscRem<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java deleted file mode 100644 index 89fe6b2f6fe..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscReshape operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscReshape.OP_NAME, - inputsClass = RiscReshape.Inputs.class -) -public final class RiscReshape extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscReshape"; - - private Output output; - - public RiscReshape(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscReshape operation. - * - * @param scope current scope - * @param tensor The tensor value - * @param shape The shape value - * @param data type for {@code RiscReshape} output and operands - * @return a new instance of RiscReshape - */ - @Endpoint( - describeByClass = true - ) - public static RiscReshape create(Scope scope, Operand tensor, - Operand shape) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscReshape"); - opBuilder.addInput(tensor.asOutput()); - opBuilder.addInput(shape.asOutput()); - return new RiscReshape<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscReshape.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The tensor input - */ - public final Operand tensor; - - /** - * The shape input - */ - public final Operand shape; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tshape attribute - */ - public final DataType Tshape; - - public Inputs(GraphOperation op) { - super(new RiscReshape<>(op), op, Arrays.asList("T", "Tshape")); - int inputIndex = 0; - tensor = (Operand) op.input(inputIndex++); - shape = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tshape = op.attributes().getAttrType("Tshape"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java deleted file mode 100644 index 37b5c69d7c6..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscScatter operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscScatter.OP_NAME, - inputsClass = RiscScatter.Inputs.class -) -public final class RiscScatter extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscScatter"; - - private Output output; - - public RiscScatter(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscScatter operation. - * - * @param scope current scope - * @param indices The indices value - * @param updates The updates value - * @param shape The shape value - * @param data type for {@code RiscScatter} output and operands - * @param data type for {@code RiscScatter} output and operands - * @return a new instance of RiscScatter - */ - @Endpoint( - describeByClass = true - ) - public static RiscScatter create(Scope scope, - Operand indices, Operand updates, Operand shape) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscScatter"); - opBuilder.addInput(indices.asOutput()); - opBuilder.addInput(updates.asOutput()); - opBuilder.addInput(shape.asOutput()); - return new RiscScatter<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscScatter.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The indices input - */ - public final Operand indices; - - /** - * The updates input - */ - public final Operand updates; - - /** - * The shape input - */ - public final Operand shape; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tindices attribute - */ - public final DataType Tindices; - - public Inputs(GraphOperation op) { - super(new RiscScatter<>(op), op, Arrays.asList("T", "Tindices")); - int inputIndex = 0; - indices = (Operand) op.input(inputIndex++); - updates = (Operand) op.input(inputIndex++); - shape = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tindices = op.attributes().getAttrType("Tindices"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java deleted file mode 100644 index 51614887315..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscShape operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscShape.OP_NAME, - inputsClass = RiscShape.Inputs.class -) -public final class RiscShape extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscShape"; - - private Output output; - - public RiscShape(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscShape operation. - * - * @param scope current scope - * @param input The input value - * @param outType The value of the outType attribute - * @param data type for {@code RiscShape} output and operands - * @return a new instance of RiscShape - */ - @Endpoint( - describeByClass = true - ) - public static RiscShape create(Scope scope, - Operand input, Class outType) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscShape"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("out_type", Operands.toDataType(outType)); - return new RiscShape<>(opBuilder.build()); - } - - /** - * Factory method to create a class wrapping a new RiscShape operation, with the default output types. - * - * @param scope current scope - * @param input The input value - * @return a new instance of RiscShape, with default output types - */ - @Endpoint( - describeByClass = true - ) - public static RiscShape create(Scope scope, Operand input) { - return create(scope, input, TInt32.class); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscShape.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The outType attribute - */ - public final DataType outType; - - public Inputs(GraphOperation op) { - super(new RiscShape<>(op), op, Arrays.asList("T", "out_type")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - outType = op.attributes().getAttrType("out_type"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java deleted file mode 100644 index 8b84667e605..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscSign operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscSign.OP_NAME, - inputsClass = RiscSign.Inputs.class -) -public final class RiscSign extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscSign"; - - private Output y; - - public RiscSign(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscSign operation. - * - * @param scope current scope - * @param x The x value - * @param data type for {@code RiscSign} output and operands - * @return a new instance of RiscSign - */ - @Endpoint( - describeByClass = true - ) - public static RiscSign create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscSign"); - opBuilder.addInput(x.asOutput()); - return new RiscSign<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscSign.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscSign<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java deleted file mode 100644 index 808b9645db1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscSlice operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscSlice.OP_NAME, - inputsClass = RiscSlice.Inputs.class -) -public final class RiscSlice extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscSlice"; - - private Output output; - - public RiscSlice(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscSlice operation. - * - * @param scope current scope - * @param input The input value - * @param begin The begin value - * @param sizeOutput The sizeOutput value - * @param data type for {@code RiscSlice} output and operands - * @param data type for {@code RiscSlice} output and operands - * @return a new instance of RiscSlice - */ - @Endpoint( - describeByClass = true - ) - public static RiscSlice create(Scope scope, - Operand input, Operand begin, Operand sizeOutput) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscSlice"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(begin.asOutput()); - opBuilder.addInput(sizeOutput.asOutput()); - return new RiscSlice<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscSlice.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The begin input - */ - public final Operand begin; - - /** - * The sizeOutput input - */ - public final Operand sizeOutput; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Index attribute - */ - public final DataType Index; - - public Inputs(GraphOperation op) { - super(new RiscSlice<>(op), op, Arrays.asList("T", "Index")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - begin = (Operand) op.input(inputIndex++); - sizeOutput = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Index = op.attributes().getAttrType("Index"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java deleted file mode 100644 index 9cd18955703..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscSort operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscSort.OP_NAME, - inputsClass = RiscSort.Inputs.class -) -public final class RiscSort extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscSort"; - - private Output output; - - public RiscSort(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscSort operation. - * - * @param scope current scope - * @param input The input value - * @param axis The axis value - * @param direction The value of the direction attribute - * @param data type for {@code RiscSort} output and operands - * @return a new instance of RiscSort - */ - @Endpoint( - describeByClass = true - ) - public static RiscSort create(Scope scope, Operand input, - Operand axis, String direction) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscSort"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(axis.asOutput()); - opBuilder.setAttr("direction", direction); - return new RiscSort<>(opBuilder.build()); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - @OpInputsMetadata( - outputsClass = RiscSort.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The axis input - */ - public final Operand axis; - - /** - * The Index attribute - */ - public final DataType Index; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The direction attribute - */ - public final String direction; - - public Inputs(GraphOperation op) { - super(new RiscSort<>(op), op, Arrays.asList("Index", "T", "direction")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - axis = (Operand) op.input(inputIndex++); - Index = op.attributes().getAttrType("Index"); - T = op.attributes().getAttrType("T"); - direction = op.attributes().getAttrString("direction"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java deleted file mode 100644 index d63a3018ff0..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TType; - -/** - * The RiscSqueeze operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscSqueeze.OP_NAME, - inputsClass = RiscSqueeze.Inputs.class -) -public final class RiscSqueeze extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscSqueeze"; - - private Output output; - - public RiscSqueeze(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscSqueeze operation. - * - * @param scope current scope - * @param input The input value - * @param options carries optional attribute values - * @param data type for {@code RiscSqueeze} output and operands - * @return a new instance of RiscSqueeze - */ - @Endpoint( - describeByClass = true - ) - public static RiscSqueeze create(Scope scope, Operand input, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscSqueeze"); - opBuilder.addInput(input.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.squeezeDims != null) { - long[] squeezeDimsArray = new long[opts.squeezeDims.size()]; - for (int i = 0 ; i < squeezeDimsArray.length ; i++) { - squeezeDimsArray[i] = opts.squeezeDims.get(i); - } - opBuilder.setAttr("squeeze_dims", squeezeDimsArray); - } - } - } - return new RiscSqueeze<>(opBuilder.build()); - } - - /** - * Sets the squeezeDims option. - * - * @param squeezeDims the squeezeDims option - * @return this Options instance. - */ - public static Options squeezeDims(List squeezeDims) { - return new Options().squeezeDims(squeezeDims); - } - - /** - * Sets the squeezeDims option. - * - * @param squeezeDims the squeezeDims option - * @return this Options instance. - */ - public static Options squeezeDims(Long... squeezeDims) { - return new Options().squeezeDims(squeezeDims); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscSqueeze} - */ - public static class Options { - private List squeezeDims; - - private Options() { - } - - /** - * Sets the squeezeDims option. - * - * @param squeezeDims the squeezeDims option - * @return this Options instance. - */ - public Options squeezeDims(List squeezeDims) { - this.squeezeDims = squeezeDims; - return this; - } - - /** - * Sets the squeezeDims option. - * - * @param squeezeDims the squeezeDims option - * @return this Options instance. - */ - public Options squeezeDims(Long... squeezeDims) { - this.squeezeDims = Arrays.asList(squeezeDims); - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscSqueeze.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The squeezeDims attribute - */ - public final long[] squeezeDims; - - public Inputs(GraphOperation op) { - super(new RiscSqueeze<>(op), op, Arrays.asList("T", "squeeze_dims")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - squeezeDims = op.attributes().getAttrIntList("squeeze_dims"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java deleted file mode 100644 index 55277160351..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscSub operation - * - * @param data type for {@code z} output - */ -@OpMetadata( - opType = RiscSub.OP_NAME, - inputsClass = RiscSub.Inputs.class -) -public final class RiscSub extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscSub"; - - private Output z; - - public RiscSub(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - z = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscSub operation. - * - * @param scope current scope - * @param x The x value - * @param y The y value - * @param data type for {@code RiscSub} output and operands - * @return a new instance of RiscSub - */ - @Endpoint( - describeByClass = true - ) - public static RiscSub create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscSub"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscSub<>(opBuilder.build()); - } - - /** - * Gets z. - * - * @return z. - */ - public Output z() { - return z; - } - - @Override - public Output asOutput() { - return z; - } - - @OpInputsMetadata( - outputsClass = RiscSub.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscSub<>(op), op, Arrays.asList("T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java deleted file mode 100644 index 32be8a3725c..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * The RiscTranspose operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscTranspose.OP_NAME, - inputsClass = RiscTranspose.Inputs.class -) -public final class RiscTranspose extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscTranspose"; - - private Output y; - - public RiscTranspose(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscTranspose operation. - * - * @param scope current scope - * @param x The x value - * @param perm The perm value - * @param data type for {@code RiscTranspose} output and operands - * @return a new instance of RiscTranspose - */ - @Endpoint( - describeByClass = true - ) - public static RiscTranspose create(Scope scope, Operand x, - Operand perm) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscTranspose"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(perm.asOutput()); - return new RiscTranspose<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscTranspose.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The perm input - */ - public final Operand perm; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The Tperm attribute - */ - public final DataType Tperm; - - public Inputs(GraphOperation op) { - super(new RiscTranspose<>(op), op, Arrays.asList("T", "Tperm")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - perm = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - Tperm = op.attributes().getAttrType("Tperm"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java deleted file mode 100644 index 8895cf7e1c1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java +++ /dev/null @@ -1,196 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscTriangularSolve operation - * - * @param data type for {@code output} output - */ -@OpMetadata( - opType = RiscTriangularSolve.OP_NAME, - inputsClass = RiscTriangularSolve.Inputs.class -) -public final class RiscTriangularSolve extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscTriangularSolve"; - - private Output output; - - public RiscTriangularSolve(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - output = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscTriangularSolve operation. - * - * @param scope current scope - * @param matrix The matrix value - * @param rhs The rhs value - * @param options carries optional attribute values - * @param data type for {@code RiscTriangularSolve} output and operands - * @return a new instance of RiscTriangularSolve - */ - @Endpoint( - describeByClass = true - ) - public static RiscTriangularSolve create(Scope scope, Operand matrix, - Operand rhs, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscTriangularSolve"); - opBuilder.addInput(matrix.asOutput()); - opBuilder.addInput(rhs.asOutput()); - if (options != null) { - for (Options opts : options) { - if (opts.lower != null) { - opBuilder.setAttr("lower", opts.lower); - } - if (opts.adjoint != null) { - opBuilder.setAttr("adjoint", opts.adjoint); - } - } - } - return new RiscTriangularSolve<>(opBuilder.build()); - } - - /** - * Sets the lower option. - * - * @param lower the lower option - * @return this Options instance. - */ - public static Options lower(Boolean lower) { - return new Options().lower(lower); - } - - /** - * Sets the adjoint option. - * - * @param adjoint the adjoint option - * @return this Options instance. - */ - public static Options adjoint(Boolean adjoint) { - return new Options().adjoint(adjoint); - } - - /** - * Gets output. - * - * @return output. - */ - public Output output() { - return output; - } - - @Override - public Output asOutput() { - return output; - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscTriangularSolve} - */ - public static class Options { - private Boolean lower; - - private Boolean adjoint; - - private Options() { - } - - /** - * Sets the lower option. - * - * @param lower the lower option - * @return this Options instance. - */ - public Options lower(Boolean lower) { - this.lower = lower; - return this; - } - - /** - * Sets the adjoint option. - * - * @param adjoint the adjoint option - * @return this Options instance. - */ - public Options adjoint(Boolean adjoint) { - this.adjoint = adjoint; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscTriangularSolve.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The matrix input - */ - public final Operand matrix; - - /** - * The rhs input - */ - public final Operand rhs; - - /** - * The lower attribute - */ - public final boolean lower; - - /** - * The adjoint attribute - */ - public final boolean adjoint; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscTriangularSolve<>(op), op, Arrays.asList("lower", "adjoint", "T")); - int inputIndex = 0; - matrix = (Operand) op.input(inputIndex++); - rhs = (Operand) op.input(inputIndex++); - lower = op.attributes().getAttrBool("lower"); - adjoint = op.attributes().getAttrBool("adjoint"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java deleted file mode 100644 index 1e9a8f38454..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * The RiscUnary operation - * - * @param data type for {@code y} output - */ -@OpMetadata( - opType = RiscUnary.OP_NAME, - inputsClass = RiscUnary.Inputs.class -) -public final class RiscUnary extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscUnary"; - - private Output y; - - public RiscUnary(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - y = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RiscUnary operation. - * - * @param scope current scope - * @param x The x value - * @param opType The value of the opType attribute - * @param data type for {@code RiscUnary} output and operands - * @return a new instance of RiscUnary - */ - @Endpoint( - describeByClass = true - ) - public static RiscUnary create(Scope scope, Operand x, String opType) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscUnary"); - opBuilder.addInput(x.asOutput()); - opBuilder.setAttr("op_type", opType); - return new RiscUnary<>(opBuilder.build()); - } - - /** - * Gets y. - * - * @return y. - */ - public Output y() { - return y; - } - - @Override - public Output asOutput() { - return y; - } - - @OpInputsMetadata( - outputsClass = RiscUnary.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The x input - */ - public final Operand x; - - /** - * The opType attribute - */ - public final String opType; - - /** - * The T attribute - */ - public final DataType T; - - public Inputs(GraphOperation op) { - super(new RiscUnary<>(op), op, Arrays.asList("op_type", "T")); - int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - opType = op.attributes().getAttrString("op_type"); - T = op.attributes().getAttrType("T"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java deleted file mode 100644 index 11fe3f85744..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java +++ /dev/null @@ -1,225 +0,0 @@ -/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.risc; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import org.tensorflow.ConcreteFunction; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.ndarray.Shape; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.DataType; -import org.tensorflow.types.family.TType; - -/** - * The RiscWhile operation - */ -@OpMetadata( - opType = RiscWhile.OP_NAME, - inputsClass = RiscWhile.Inputs.class -) -public final class RiscWhile extends RawOp implements Iterable> { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RiscWhile"; - - private List> output; - - @SuppressWarnings("unchecked") - public RiscWhile(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - int outputLength = operation.outputListLength("output"); - output = Arrays.asList(operation.outputList(outputIdx, outputLength)); - outputIdx += outputLength; - } - - /** - * Factory method to create a class wrapping a new RiscWhile operation. - * - * @param scope current scope - * @param input The input value - * @param cond The value of the cond attribute - * @param body The value of the body attribute - * @param options carries optional attribute values - * @return a new instance of RiscWhile - */ - @Endpoint( - describeByClass = true - ) - public static RiscWhile create(Scope scope, Iterable> input, ConcreteFunction cond, - ConcreteFunction body, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscWhile"); - opBuilder.addInputList(Operands.asOutputs(input)); - opBuilder.setAttr("cond", cond); - opBuilder.setAttr("body", body); - if (options != null) { - for (Options opts : options) { - if (opts.outputShapes != null) { - Shape[] outputShapesArray = new Shape[opts.outputShapes.size()]; - for (int i = 0 ; i < outputShapesArray.length ; i++) { - outputShapesArray[i] = opts.outputShapes.get(i); - } - opBuilder.setAttr("output_shapes", outputShapesArray); - } - if (opts.parallelIterations != null) { - opBuilder.setAttr("parallel_iterations", opts.parallelIterations); - } - } - } - return new RiscWhile(opBuilder.build()); - } - - /** - * Sets the outputShapes option. - * - * @param outputShapes the outputShapes option - * @return this Options instance. - */ - public static Options outputShapes(List outputShapes) { - return new Options().outputShapes(outputShapes); - } - - /** - * Sets the outputShapes option. - * - * @param outputShapes the outputShapes option - * @return this Options instance. - */ - public static Options outputShapes(Shape... outputShapes) { - return new Options().outputShapes(outputShapes); - } - - /** - * Sets the parallelIterations option. - * - * @param parallelIterations the parallelIterations option - * @return this Options instance. - */ - public static Options parallelIterations(Long parallelIterations) { - return new Options().parallelIterations(parallelIterations); - } - - /** - * Gets output. - * - * @return output. - */ - public List> output() { - return output; - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) output.iterator(); - } - - /** - * Optional attributes for {@link org.tensorflow.op.risc.RiscWhile} - */ - public static class Options { - private List outputShapes; - - private Long parallelIterations; - - private Options() { - } - - /** - * Sets the outputShapes option. - * - * @param outputShapes the outputShapes option - * @return this Options instance. - */ - public Options outputShapes(List outputShapes) { - this.outputShapes = outputShapes; - return this; - } - - /** - * Sets the outputShapes option. - * - * @param outputShapes the outputShapes option - * @return this Options instance. - */ - public Options outputShapes(Shape... outputShapes) { - this.outputShapes = Arrays.asList(outputShapes); - return this; - } - - /** - * Sets the parallelIterations option. - * - * @param parallelIterations the parallelIterations option - * @return this Options instance. - */ - public Options parallelIterations(Long parallelIterations) { - this.parallelIterations = parallelIterations; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RiscWhile.class - ) - public static class Inputs extends RawOpInputs { - /** - * The input input - */ - public final Iterable> input; - - /** - * The T attribute - */ - public final DataType[] T; - - /** - * The outputShapes attribute - */ - public final Shape[] outputShapes; - - /** - * The parallelIterations attribute - */ - public final long parallelIterations; - - public Inputs(GraphOperation op) { - super(new RiscWhile(op), op, Arrays.asList("T", "output_shapes", "parallel_iterations")); - int inputIndex = 0; - int inputLength = op.inputListLength("input"); - input = Arrays.asList((Operand[]) op.inputList(inputIndex, inputLength)); - inputIndex += inputLength; - T = op.attributes().getAttrTypeList("T"); - outputShapes = op.attributes().getAttrShapeList("output_shapes"); - parallelIterations = op.attributes().getAttrInt("parallel_iterations"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java index 42ef1e6bdf9..220c72d1723 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java @@ -37,8 +37,6 @@ * Fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java index 118d2db63e0..4f78086027b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java @@ -37,8 +37,6 @@ * 2D fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java index 6195de0eae8..7f5478e228a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java @@ -37,8 +37,6 @@ * 3D fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java index b7f4268150c..8f530229379 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java @@ -44,8 +44,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java index 3a313a6f23e..6b1f6fa6d8c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java @@ -37,8 +37,6 @@ * Inverse fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java index ad0902bf3a1..2c4c19b2ead 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java @@ -37,8 +37,6 @@ * Inverse 2D fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java index 82251ed232c..efcb06fafcd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java @@ -37,8 +37,6 @@ * Inverse 3D fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java index 82855d2bab4..181e3756015 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java @@ -44,8 +44,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java index ecf2703b6e8..50f6daef0a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java @@ -50,8 +50,6 @@ *

    Along the axis {@code signal.Irfft} is computed on, if {@code fft_length / 2 + 1} is smaller * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java index 8a448fd2a52..01214bfec41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java @@ -51,8 +51,6 @@ * {@code fft_length / 2 + 1} for the inner-most dimension) is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java index a336791cb83..c83389668b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java @@ -51,8 +51,6 @@ * {@code fft_length / 2 + 1} for the inner-most dimension) is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java index 93006aea156..5e83c9f4dc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java @@ -48,8 +48,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IrfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java index f5c14f6eec7..c4d7b74e39a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java @@ -46,8 +46,6 @@ *

    Along the axis {@code signal.Rfft} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java index 6587b7378c1..314d16f4eec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java @@ -47,8 +47,6 @@ *

    Along each axis {@code signal.Rfft2d} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java index 35746c0f93b..282c4b7386e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java @@ -47,8 +47,6 @@ *

    Along each axis {@code signal.Rfft3d} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java index 85e48957ee4..17bf1368600 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java @@ -47,8 +47,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java index d0ad3bed95e..59d8449fa2e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java @@ -208,12 +208,12 @@ public static class Inputs extends RawOpInputs { public final DataType T; /** - * The container name for the `SparseTensorsMap` created by this op. + * The container name for the {@code SparseTensorsMap} created by this op. */ public final String container; /** - * The shared name for the `SparseTensorsMap` created by this op. + * The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. */ public final String sharedName; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java index c15afe711c9..ddfba840a21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java @@ -198,12 +198,12 @@ public static class Inputs extends RawOpInputs { public final DataType T; /** - * The container name for the `SparseTensorsMap` created by this op. + * The container name for the {@code SparseTensorsMap} created by this op. */ public final String container; /** - * The shared name for the `SparseTensorsMap` created by this op. + * The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. */ public final String sharedName; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToListOfSparseCoreCooTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToListOfSparseCoreCooTensors.java new file mode 100644 index 00000000000..7ed71c4c316 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToListOfSparseCoreCooTensors.java @@ -0,0 +1,209 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.sparse; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The ConvertToListOfSparseCoreCooTensors operation + */ +@OpMetadata( + opType = ConvertToListOfSparseCoreCooTensors.OP_NAME, + inputsClass = ConvertToListOfSparseCoreCooTensors.Inputs.class +) +@Operator( + group = "sparse" +) +public final class ConvertToListOfSparseCoreCooTensors extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ConvertToListOfSparseCoreCooTensors"; + + private List> rowIdsList; + + private List> colIdsList; + + private List> gainsList; + + @SuppressWarnings("unchecked") + public ConvertToListOfSparseCoreCooTensors(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int rowIdsListLength = operation.outputListLength("row_ids_list"); + rowIdsList = Arrays.asList((Output[]) operation.outputList(outputIdx, rowIdsListLength)); + outputIdx += rowIdsListLength; + int colIdsListLength = operation.outputListLength("col_ids_list"); + colIdsList = Arrays.asList((Output[]) operation.outputList(outputIdx, colIdsListLength)); + outputIdx += colIdsListLength; + int gainsListLength = operation.outputListLength("gains_list"); + gainsList = Arrays.asList((Output[]) operation.outputList(outputIdx, gainsListLength)); + outputIdx += gainsListLength; + } + + /** + * Factory method to create a class wrapping a new ConvertToListOfSparseCoreCooTensors operation. + * + * @param scope current scope + * @param indicesOrRowSplits The indicesOrRowSplits value + * @param values The values value + * @param weights The weights value + * @param sampleCount The value of the sampleCount attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param rowOffset The value of the rowOffset attribute + * @param colOffset The value of the colOffset attribute + * @param colShift The value of the colShift attribute + * @param numScShards The value of the numScShards attribute + * @param stackedTableSampleCount The value of the stackedTableSampleCount attribute + * @param combiner The value of the combiner attribute + * @return a new instance of ConvertToListOfSparseCoreCooTensors + */ + @Endpoint( + describeByClass = true + ) + public static ConvertToListOfSparseCoreCooTensors create(Scope scope, + Operand indicesOrRowSplits, Operand values, Operand weights, + Long sampleCount, Long numScPerChip, Long rowOffset, Long colOffset, Long colShift, + Long numScShards, Long stackedTableSampleCount, String combiner) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConvertToListOfSparseCoreCooTensors"); + opBuilder.addInput(indicesOrRowSplits.asOutput()); + opBuilder.addInput(values.asOutput()); + opBuilder.addInput(weights.asOutput()); + opBuilder.setAttr("sample_count", sampleCount); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("row_offset", rowOffset); + opBuilder.setAttr("col_offset", colOffset); + opBuilder.setAttr("col_shift", colShift); + opBuilder.setAttr("num_sc_shards", numScShards); + opBuilder.setAttr("stacked_table_sample_count", stackedTableSampleCount); + opBuilder.setAttr("combiner", combiner); + return new ConvertToListOfSparseCoreCooTensors(opBuilder.build()); + } + + /** + * Gets rowIdsList. + * + * @return rowIdsList. + */ + public List> rowIdsList() { + return rowIdsList; + } + + /** + * Gets colIdsList. + * + * @return colIdsList. + */ + public List> colIdsList() { + return colIdsList; + } + + /** + * Gets gainsList. + * + * @return gainsList. + */ + public List> gainsList() { + return gainsList; + } + + @OpInputsMetadata( + outputsClass = ConvertToListOfSparseCoreCooTensors.class + ) + public static class Inputs extends RawOpInputs { + /** + * The indicesOrRowSplits input + */ + public final Operand indicesOrRowSplits; + + /** + * The values input + */ + public final Operand values; + + /** + * The weights input + */ + public final Operand weights; + + /** + * The sampleCount attribute + */ + public final long sampleCount; + + /** + * The rowOffset attribute + */ + public final long rowOffset; + + /** + * The colOffset attribute + */ + public final long colOffset; + + /** + * The colShift attribute + */ + public final long colShift; + + /** + * The numScShards attribute + */ + public final long numScShards; + + /** + * The stackedTableSampleCount attribute + */ + public final long stackedTableSampleCount; + + /** + * The combiner attribute + */ + public final String combiner; + + public Inputs(GraphOperation op) { + super(new ConvertToListOfSparseCoreCooTensors(op), op, Arrays.asList("sample_count", "row_offset", "col_offset", "col_shift", "num_sc_shards", "stacked_table_sample_count", "combiner")); + int inputIndex = 0; + indicesOrRowSplits = (Operand) op.input(inputIndex++); + values = (Operand) op.input(inputIndex++); + weights = (Operand) op.input(inputIndex++); + sampleCount = op.attributes().getAttrInt("sample_count"); + rowOffset = op.attributes().getAttrInt("row_offset"); + colOffset = op.attributes().getAttrInt("col_offset"); + colShift = op.attributes().getAttrInt("col_shift"); + numScShards = op.attributes().getAttrInt("num_sc_shards"); + stackedTableSampleCount = op.attributes().getAttrInt("stacked_table_sample_count"); + combiner = op.attributes().getAttrString("combiner"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToSparseCoreCsrWrappedCooTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToSparseCoreCsrWrappedCooTensor.java new file mode 100644 index 00000000000..6590a927699 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/ConvertToSparseCoreCsrWrappedCooTensor.java @@ -0,0 +1,283 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.sparse; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; + +/** + * The ConvertToSparseCoreCsrWrappedCooTensor operation + */ +@OpMetadata( + opType = ConvertToSparseCoreCsrWrappedCooTensor.OP_NAME, + inputsClass = ConvertToSparseCoreCsrWrappedCooTensor.Inputs.class +) +@Operator( + group = "sparse" +) +public final class ConvertToSparseCoreCsrWrappedCooTensor extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ConvertToSparseCoreCsrWrappedCooTensor"; + + private Output rowPointers; + + private Output sortedSampleIds; + + private Output sortedTokenIds; + + private Output sortedGains; + + private Output rowPointersUnpaddedSize; + + private Output idsUnpaddedSize; + + private Output numMinibatchesPerSc; + + public ConvertToSparseCoreCsrWrappedCooTensor(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + rowPointers = operation.output(outputIdx++); + sortedSampleIds = operation.output(outputIdx++); + sortedTokenIds = operation.output(outputIdx++); + sortedGains = operation.output(outputIdx++); + rowPointersUnpaddedSize = operation.output(outputIdx++); + idsUnpaddedSize = operation.output(outputIdx++); + numMinibatchesPerSc = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ConvertToSparseCoreCsrWrappedCooTensor operation. + * + * @param scope current scope + * @param sortedRowIdsList The sortedRowIdsList value + * @param sortedColIdsList The sortedColIdsList value + * @param sortedGainsList The sortedGainsList value + * @param idCountsList The idCountsList value + * @param splits The splits value + * @param sampleCountPerSc The value of the sampleCountPerSc attribute + * @param numReplica The value of the numReplica attribute + * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute + * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param tableName The value of the tableName attribute + * @param allowIdDropping The value of the allowIdDropping attribute + * @return a new instance of ConvertToSparseCoreCsrWrappedCooTensor + */ + @Endpoint( + describeByClass = true + ) + public static ConvertToSparseCoreCsrWrappedCooTensor create(Scope scope, + Iterable> sortedRowIdsList, Iterable> sortedColIdsList, + Iterable> sortedGainsList, Iterable> idCountsList, + Operand splits, Long sampleCountPerSc, Long numReplica, Long maxMinibatchesPerSc, + Long maxIdsPerChipPerSample, Long tableVocabSize, Long featureWidth, String tableName, + Boolean allowIdDropping) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConvertToSparseCoreCsrWrappedCooTensor"); + opBuilder.addInputList(Operands.asOutputs(sortedRowIdsList)); + opBuilder.addInputList(Operands.asOutputs(sortedColIdsList)); + opBuilder.addInputList(Operands.asOutputs(sortedGainsList)); + opBuilder.addInputList(Operands.asOutputs(idCountsList)); + opBuilder.addInput(splits.asOutput()); + opBuilder.setAttr("sample_count_per_sc", sampleCountPerSc); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("max_minibatches_per_sc", maxMinibatchesPerSc); + opBuilder.setAttr("max_ids_per_chip_per_sample", maxIdsPerChipPerSample); + opBuilder.setAttr("table_vocab_size", tableVocabSize); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("table_name", tableName); + opBuilder.setAttr("allow_id_dropping", allowIdDropping); + return new ConvertToSparseCoreCsrWrappedCooTensor(opBuilder.build()); + } + + /** + * Gets rowPointers. + * + * @return rowPointers. + */ + public Output rowPointers() { + return rowPointers; + } + + /** + * Gets sortedSampleIds. + * + * @return sortedSampleIds. + */ + public Output sortedSampleIds() { + return sortedSampleIds; + } + + /** + * Gets sortedTokenIds. + * + * @return sortedTokenIds. + */ + public Output sortedTokenIds() { + return sortedTokenIds; + } + + /** + * Gets sortedGains. + * + * @return sortedGains. + */ + public Output sortedGains() { + return sortedGains; + } + + /** + * Gets rowPointersUnpaddedSize. + * + * @return rowPointersUnpaddedSize. + */ + public Output rowPointersUnpaddedSize() { + return rowPointersUnpaddedSize; + } + + /** + * Gets idsUnpaddedSize. + * + * @return idsUnpaddedSize. + */ + public Output idsUnpaddedSize() { + return idsUnpaddedSize; + } + + /** + * Gets numMinibatchesPerSc. + * + * @return numMinibatchesPerSc. + */ + public Output numMinibatchesPerSc() { + return numMinibatchesPerSc; + } + + @OpInputsMetadata( + outputsClass = ConvertToSparseCoreCsrWrappedCooTensor.class + ) + public static class Inputs extends RawOpInputs { + /** + * The sortedRowIdsList input + */ + public final Iterable> sortedRowIdsList; + + /** + * The sortedColIdsList input + */ + public final Iterable> sortedColIdsList; + + /** + * The sortedGainsList input + */ + public final Iterable> sortedGainsList; + + /** + * The idCountsList input + */ + public final Iterable> idCountsList; + + /** + * The splits input + */ + public final Operand splits; + + /** + * The sampleCountPerSc attribute + */ + public final long sampleCountPerSc; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The maxMinibatchesPerSc attribute + */ + public final long maxMinibatchesPerSc; + + /** + * The maxIdsPerChipPerSample attribute + */ + public final long maxIdsPerChipPerSample; + + /** + * The tableVocabSize attribute + */ + public final long tableVocabSize; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The tableName attribute + */ + public final String tableName; + + /** + * The allowIdDropping attribute + */ + public final boolean allowIdDropping; + + public Inputs(GraphOperation op) { + super(new ConvertToSparseCoreCsrWrappedCooTensor(op), op, Arrays.asList("sample_count_per_sc", "num_replica", "max_minibatches_per_sc", "max_ids_per_chip_per_sample", "table_vocab_size", "feature_width", "table_name", "allow_id_dropping")); + int inputIndex = 0; + int sortedRowIdsListLength = op.inputListLength("sorted_row_ids_list"); + sortedRowIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, sortedRowIdsListLength)); + inputIndex += sortedRowIdsListLength; + int sortedColIdsListLength = op.inputListLength("sorted_col_ids_list"); + sortedColIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, sortedColIdsListLength)); + inputIndex += sortedColIdsListLength; + int sortedGainsListLength = op.inputListLength("sorted_gains_list"); + sortedGainsList = Arrays.asList((Operand[]) op.inputList(inputIndex, sortedGainsListLength)); + inputIndex += sortedGainsListLength; + int idCountsListLength = op.inputListLength("id_counts_list"); + idCountsList = Arrays.asList((Operand[]) op.inputList(inputIndex, idCountsListLength)); + inputIndex += idCountsListLength; + splits = (Operand) op.input(inputIndex++); + sampleCountPerSc = op.attributes().getAttrInt("sample_count_per_sc"); + numReplica = op.attributes().getAttrInt("num_replica"); + maxMinibatchesPerSc = op.attributes().getAttrInt("max_minibatches_per_sc"); + maxIdsPerChipPerSample = op.attributes().getAttrInt("max_ids_per_chip_per_sample"); + tableVocabSize = op.attributes().getAttrInt("table_vocab_size"); + featureWidth = op.attributes().getAttrInt("feature_width"); + tableName = op.attributes().getAttrString("table_name"); + allowIdDropping = op.attributes().getAttrBool("allow_id_dropping"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java index 51c60674ed1..5cf78a2a0a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Performs sparse-output bin counting for a tf.tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = DenseCountSparseOutput.OP_NAME, inputsClass = DenseCountSparseOutput.Inputs.class ) +@Operator( + group = "sparse" +) public final class DenseCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java index 2ea6aa671d1..546adba1a9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java @@ -42,8 +42,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = DenseToDenseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java index bb75893bfd4..1b8cbcaee50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java @@ -48,8 +48,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = DenseToSparseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java index 851b3eb9856..ba0c51f9a1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java @@ -76,8 +76,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = DeserializeSparse.OP_NAME, @@ -165,7 +163,7 @@ public static class Inputs extends RawOpInputs> { public final Operand serializedSparse; /** - * The `dtype` of the serialized `SparseTensor` objects. + * The {@code dtype} of the serialized {@code SparseTensor} objects. */ public final DataType dtype; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/GetStatsFromListOfSparseCoreCooTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/GetStatsFromListOfSparseCoreCooTensors.java new file mode 100644 index 00000000000..51f5c33d66b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/GetStatsFromListOfSparseCoreCooTensors.java @@ -0,0 +1,204 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.sparse; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The GetStatsFromListOfSparseCoreCooTensors operation + */ +@OpMetadata( + opType = GetStatsFromListOfSparseCoreCooTensors.OP_NAME, + inputsClass = GetStatsFromListOfSparseCoreCooTensors.Inputs.class +) +@Operator( + group = "sparse" +) +public final class GetStatsFromListOfSparseCoreCooTensors extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "GetStatsFromListOfSparseCoreCooTensors"; + + private Output maxIdsPerSparseCore; + + private Output maxUniqueIdsPerSparseCore; + + public GetStatsFromListOfSparseCoreCooTensors(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + maxIdsPerSparseCore = operation.output(outputIdx++); + maxUniqueIdsPerSparseCore = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new GetStatsFromListOfSparseCoreCooTensors operation. + * + * @param scope current scope + * @param rowIdsList The rowIdsList value + * @param colIdsList The colIdsList value + * @param gainsList The gainsList value + * @param sampleCountList The value of the sampleCountList attribute + * @param colOffsetList The value of the colOffsetList attribute + * @param numReplica The value of the numReplica attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @return a new instance of GetStatsFromListOfSparseCoreCooTensors + */ + @Endpoint( + describeByClass = true + ) + public static GetStatsFromListOfSparseCoreCooTensors create(Scope scope, + Iterable> rowIdsList, Iterable> colIdsList, + Iterable> gainsList, List sampleCountList, List colOffsetList, + Long numReplica, Long tableVocabSize, Long featureWidth, Long numScPerChip, + String tableName) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetStatsFromListOfSparseCoreCooTensors"); + opBuilder.addInputList(Operands.asOutputs(rowIdsList)); + opBuilder.addInputList(Operands.asOutputs(colIdsList)); + opBuilder.addInputList(Operands.asOutputs(gainsList)); + long[] sampleCountListArray = new long[sampleCountList.size()]; + for (int i = 0 ; i < sampleCountListArray.length ; i++) { + sampleCountListArray[i] = sampleCountList.get(i); + } + opBuilder.setAttr("sample_count_list", sampleCountListArray); + long[] colOffsetListArray = new long[colOffsetList.size()]; + for (int i = 0 ; i < colOffsetListArray.length ; i++) { + colOffsetListArray[i] = colOffsetList.get(i); + } + opBuilder.setAttr("col_offset_list", colOffsetListArray); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("table_vocab_size", tableVocabSize); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("table_name", tableName); + return new GetStatsFromListOfSparseCoreCooTensors(opBuilder.build()); + } + + /** + * Gets maxIdsPerSparseCore. + * + * @return maxIdsPerSparseCore. + */ + public Output maxIdsPerSparseCore() { + return maxIdsPerSparseCore; + } + + /** + * Gets maxUniqueIdsPerSparseCore. + * + * @return maxUniqueIdsPerSparseCore. + */ + public Output maxUniqueIdsPerSparseCore() { + return maxUniqueIdsPerSparseCore; + } + + @OpInputsMetadata( + outputsClass = GetStatsFromListOfSparseCoreCooTensors.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowIdsList input + */ + public final Iterable> rowIdsList; + + /** + * The colIdsList input + */ + public final Iterable> colIdsList; + + /** + * The gainsList input + */ + public final Iterable> gainsList; + + /** + * The sampleCountList attribute + */ + public final long[] sampleCountList; + + /** + * The colOffsetList attribute + */ + public final long[] colOffsetList; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The tableVocabSize attribute + */ + public final long tableVocabSize; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The numScPerChip attribute + */ + public final long numScPerChip; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new GetStatsFromListOfSparseCoreCooTensors(op), op, Arrays.asList("sample_count_list", "col_offset_list", "num_replica", "table_vocab_size", "feature_width", "num_sc_per_chip", "table_name")); + int inputIndex = 0; + int rowIdsListLength = op.inputListLength("row_ids_list"); + rowIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, rowIdsListLength)); + inputIndex += rowIdsListLength; + int colIdsListLength = op.inputListLength("col_ids_list"); + colIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, colIdsListLength)); + inputIndex += colIdsListLength; + int gainsListLength = op.inputListLength("gains_list"); + gainsList = Arrays.asList((Operand[]) op.inputList(inputIndex, gainsListLength)); + inputIndex += gainsListLength; + sampleCountList = op.attributes().getAttrIntList("sample_count_list"); + colOffsetList = op.attributes().getAttrIntList("col_offset_list"); + numReplica = op.attributes().getAttrInt("num_replica"); + tableVocabSize = op.attributes().getAttrInt("table_vocab_size"); + featureWidth = op.attributes().getAttrInt("feature_width"); + numScPerChip = op.attributes().getAttrInt("num_sc_per_chip"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SortListOfSparseCoreCooTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SortListOfSparseCoreCooTensors.java new file mode 100644 index 00000000000..fb26033cfd2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SortListOfSparseCoreCooTensors.java @@ -0,0 +1,240 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.sparse; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The SortListOfSparseCoreCooTensors operation + */ +@OpMetadata( + opType = SortListOfSparseCoreCooTensors.OP_NAME, + inputsClass = SortListOfSparseCoreCooTensors.Inputs.class +) +public final class SortListOfSparseCoreCooTensors extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "SortListOfSparseCoreCooTensors"; + + private Output sortedRowIds; + + private Output sortedColIds; + + private Output sortedGains; + + private Output idCounts; + + public SortListOfSparseCoreCooTensors(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + sortedRowIds = operation.output(outputIdx++); + sortedColIds = operation.output(outputIdx++); + sortedGains = operation.output(outputIdx++); + idCounts = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new SortListOfSparseCoreCooTensors operation. + * + * @param scope current scope + * @param rowIdsList The rowIdsList value + * @param colIdsList The colIdsList value + * @param gainsList The gainsList value + * @param sampleCountList The value of the sampleCountList attribute + * @param colOffsetList The value of the colOffsetList attribute + * @param numReplica The value of the numReplica attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @return a new instance of SortListOfSparseCoreCooTensors + */ + @Endpoint( + describeByClass = true + ) + public static SortListOfSparseCoreCooTensors create(Scope scope, + Iterable> rowIdsList, Iterable> colIdsList, + Iterable> gainsList, List sampleCountList, List colOffsetList, + Long numReplica, Long tableVocabSize, Long featureWidth, Long numScPerChip, + Long maxIdsPerSparseCore, Long maxUniqueIdsPerSparseCore, String tableName) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SortListOfSparseCoreCooTensors"); + opBuilder.addInputList(Operands.asOutputs(rowIdsList)); + opBuilder.addInputList(Operands.asOutputs(colIdsList)); + opBuilder.addInputList(Operands.asOutputs(gainsList)); + long[] sampleCountListArray = new long[sampleCountList.size()]; + for (int i = 0 ; i < sampleCountListArray.length ; i++) { + sampleCountListArray[i] = sampleCountList.get(i); + } + opBuilder.setAttr("sample_count_list", sampleCountListArray); + long[] colOffsetListArray = new long[colOffsetList.size()]; + for (int i = 0 ; i < colOffsetListArray.length ; i++) { + colOffsetListArray[i] = colOffsetList.get(i); + } + opBuilder.setAttr("col_offset_list", colOffsetListArray); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("table_vocab_size", tableVocabSize); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + return new SortListOfSparseCoreCooTensors(opBuilder.build()); + } + + /** + * Gets sortedRowIds. + * + * @return sortedRowIds. + */ + public Output sortedRowIds() { + return sortedRowIds; + } + + /** + * Gets sortedColIds. + * + * @return sortedColIds. + */ + public Output sortedColIds() { + return sortedColIds; + } + + /** + * Gets sortedGains. + * + * @return sortedGains. + */ + public Output sortedGains() { + return sortedGains; + } + + /** + * Gets idCounts. + * + * @return idCounts. + */ + public Output idCounts() { + return idCounts; + } + + @OpInputsMetadata( + outputsClass = SortListOfSparseCoreCooTensors.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowIdsList input + */ + public final Iterable> rowIdsList; + + /** + * The colIdsList input + */ + public final Iterable> colIdsList; + + /** + * The gainsList input + */ + public final Iterable> gainsList; + + /** + * The sampleCountList attribute + */ + public final long[] sampleCountList; + + /** + * The colOffsetList attribute + */ + public final long[] colOffsetList; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The tableVocabSize attribute + */ + public final long tableVocabSize; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The numScPerChip attribute + */ + public final long numScPerChip; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new SortListOfSparseCoreCooTensors(op), op, Arrays.asList("sample_count_list", "col_offset_list", "num_replica", "table_vocab_size", "feature_width", "num_sc_per_chip", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + int rowIdsListLength = op.inputListLength("row_ids_list"); + rowIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, rowIdsListLength)); + inputIndex += rowIdsListLength; + int colIdsListLength = op.inputListLength("col_ids_list"); + colIdsList = Arrays.asList((Operand[]) op.inputList(inputIndex, colIdsListLength)); + inputIndex += colIdsListLength; + int gainsListLength = op.inputListLength("gains_list"); + gainsList = Arrays.asList((Operand[]) op.inputList(inputIndex, gainsListLength)); + inputIndex += gainsListLength; + sampleCountList = op.attributes().getAttrIntList("sample_count_list"); + colOffsetList = op.attributes().getAttrIntList("col_offset_list"); + numReplica = op.attributes().getAttrInt("num_replica"); + tableVocabSize = op.attributes().getAttrInt("table_vocab_size"); + featureWidth = op.attributes().getAttrInt("feature_width"); + numScPerChip = op.attributes().getAttrInt("num_sc_per_chip"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java index aeb639d2d6e..fb8a868349d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java @@ -45,8 +45,6 @@ * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. - * - * @param data type for {@code values} output */ @OpMetadata( opType = SparseAccumulatorTakeGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java index 1591773a20c..88ef61b78a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java @@ -48,8 +48,6 @@ * {@code thresh == 0} (default) means everything is kept and actual thresholding happens * only for a positive value. *

    In the following shapes, {@code nnz} is the count after taking {@code thresh} into account. - * - * @param data type for {@code sum_values} output */ @OpMetadata( opType = SparseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java index 7d6c0923f4f..8a844c96eff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java @@ -40,8 +40,6 @@ * as {@code SparseTensor} objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. - * - * @param data type for {@code a_val_grad} output */ @OpMetadata( opType = SparseAddGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java index b7414e4ab54..9eca1295d45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java index 2213a990292..016f010647b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java @@ -74,8 +74,6 @@ * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseConcat.OP_NAME, @@ -178,7 +176,7 @@ public static class Inputs extends RawOpInputs> /** * Dimension to concatenate along. Must be in range [-rank, rank), - * where rank is the number of dimensions in each input `SparseTensor`. + * where rank is the number of dimensions in each input {@code SparseTensor}. */ public final long concatDim; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java index 5e8ec528202..4c59b4e2774 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -36,13 +37,14 @@ /** * Performs sparse-output bin counting for a sparse tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseCountSparseOutput.OP_NAME, inputsClass = SparseCountSparseOutput.Inputs.class ) +@Operator( + group = "sparse" +) public final class SparseCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java index 261d292d3b0..10ac8721d98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java @@ -43,8 +43,6 @@ *

    By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java index e0b56d6827c..724997892b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java @@ -38,8 +38,6 @@ * Component-wise divides a SparseTensor by a dense Tensor. * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java index 3fb7a03c683..fe8386f0838 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java @@ -41,8 +41,6 @@ * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). *

    Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java index 989fda03492..ef0d2f85afa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java @@ -71,8 +71,6 @@ *

      * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
      * 
    - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseFillEmptyRows.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java index 21d4e2f099f..3b1c80bb5b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java @@ -43,8 +43,6 @@ *

    d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) - * - * @param data type for {@code d_values} output */ @OpMetadata( opType = SparseFillEmptyRowsGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java index 1e48a53ea82..256695f0acd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseReduceMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java index 8f337f0c19e..b0a65daea67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReduceMaxSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java index 26e0ecbfc45..3589487bece 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseReduceSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java index bb434694ccf..ef58eac0af1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReduceSumSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java index 9e963285d77..4e2883435f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java @@ -42,8 +42,6 @@ *

    Reordering does not affect the shape of the SparseTensor. *

    If the tensor has rank {@code R} and {@code N} non-empty values, {@code input_indices} has * shape {@code [N, R]}, input_values has length {@code N}, and input_shape has length {@code R}. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReorder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java index c1899b2fbf6..4703ba10fca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java @@ -38,8 +38,6 @@ * See {@code tf.sparse.segment_sum} for usage examples. *

    Like {@code SegmentMean}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentMean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java index 50f29512a23..9da8038eee9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentMeanGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java index d1c0e07c099..99cf33231a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java @@ -40,8 +40,6 @@ *

    Read * the section on segmentation * for an explanation of segments. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentMeanWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java index ee0dc4238fc..5e299d7d124 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java @@ -37,8 +37,6 @@ * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. *

    See {@code tf.sparse.segment_sum} for usage examples. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSqrtN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java index 075cbacbcfb..b458c7daff9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentSqrtNGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java index 84ccc501312..146dd696d6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java @@ -41,8 +41,6 @@ *

    Read * the section on segmentation * for an explanation of segments. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSqrtNWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java index cf2ce2c9851..2f28386d05c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java @@ -61,8 +61,6 @@ * # Which is equivalent to: * tf.segment_sum(c, tf.constant([0, 0, 1])) * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java index 71b8f92448e..1372d6f7089 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentSumGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java index 4c44377244d..88b577afec1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java @@ -59,8 +59,6 @@ * # [-1 -2 -3 -4] * # [ 0 0 0 0]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSumWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java index 58c794dfb2f..a3718f1a7e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java @@ -52,8 +52,6 @@ * [ d e ] * [ ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSlice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java index 4cfa41a7e45..969ef935dc7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java @@ -39,8 +39,6 @@ * This op takes in the upstream gradient w.r.t. non-empty values of * the sliced {@code SparseTensor}, and outputs the gradients w.r.t. * the non-empty values of input {@code SparseTensor}. - * - * @param data type for {@code val_grad} output */ @OpMetadata( opType = SparseSliceGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java index be61533da26..43cd85b5a9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java @@ -48,8 +48,6 @@ * (3) Renormalizes the remaining elements. *

    Hence, the {@code SparseTensor} result has exactly the same non-zero indices and * shape. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSoftmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java index 22a1d407274..80b44623ca8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java @@ -37,8 +37,6 @@ /** * Returns the element-wise max of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSparseMaximum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java index 8dd8978c627..ecbc022d09d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java @@ -37,8 +37,6 @@ /** * Returns the element-wise min of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSparseMinimum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java index a09e9ff9d38..da66d34d134 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java @@ -55,8 +55,6 @@ * [ d e ] * [ ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSplit.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java index c153cf68776..7f73769030b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java @@ -37,8 +37,6 @@ /** * Adds up a {@code SparseTensor} and a dense {@code Tensor}, producing a dense {@code Tensor}. * This Op does not require {@code a_indices} be sorted in standard lexicographic order. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseTensorDenseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java index 346c9297596..0425354268c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java @@ -45,8 +45,6 @@ * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). - * - * @param data type for {@code product} output */ @OpMetadata( opType = SparseTensorDenseMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java index 95c8f189d48..448a7c4ec83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java @@ -52,8 +52,6 @@ *

    Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If {@code validate_indices} is true, these properties * are checked during execution. - * - * @param data type for {@code dense} output */ @OpMetadata( opType = SparseToDense.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java index 8a71016a669..e658f88abb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java @@ -54,8 +54,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = SparseToSparseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java index 7db6538b380..2c6293f402d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java @@ -77,8 +77,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = TakeManySparseFromTensorsMap.OP_NAME, @@ -236,20 +234,20 @@ public static class Inputs extends RawOpInputs> public final Operand sparseHandles; /** - * The `dtype` of the `SparseTensor` objects stored in the - * `SparseTensorsMap`. + * The {@code dtype} of the {@code SparseTensor} objects stored in the + * {@code SparseTensorsMap}. */ public final DataType dtype; /** - * The container name for the `SparseTensorsMap` read by this op. + * The container name for the {@code SparseTensorsMap} read by this op. */ public final String container; /** - * The shared name for the `SparseTensorsMap` read by this op. - * It should not be blank; rather the `shared_name` or unique Operation name - * of the Op that created the original `SparseTensorsMap` should be used. + * The shared name for the {@code SparseTensorsMap} read by this op. + * It should not be blank; rather the {@code shared_name} or unique Operation name + * of the Op that created the original {@code SparseTensorsMap} should be used. */ public final String sharedName; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java index f2571768b5c..62c18ce5098 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java @@ -146,7 +146,7 @@ public static class Inputs extends RawOpInputs { public final Operand input; /** - * Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Character encoding of {@code input}. Allowed values are '' and 'utf-8'. * Value '' is interpreted as ASCII. */ public final String encoding; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java index c3315f28064..5bbc3e772ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java @@ -196,7 +196,7 @@ public static class Inputs extends RawOpInputs { public final Operand reductionIndices; /** - * If `True`, retain reduced dimensions with length `1`. + * If {@code True}, retain reduced dimensions with length {@code 1}. */ public final boolean keepDims; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java index af9bad8df72..bec4a4a7219 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java @@ -158,9 +158,9 @@ public static class Inputs extends RawOpInputs { public final Operand rewrite; /** - * If True, the replacement is global (that is, all matches of the `pattern` regular - * expression in each input string are rewritten), otherwise the `rewrite` - * substitution is only made for the first `pattern` match. + * If True, the replacement is global (that is, all matches of the {@code pattern} regular + * expression in each input string are rewritten), otherwise the {@code rewrite} + * substitution is only made for the first {@code pattern} match. */ public final boolean replaceGlobal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java index c4692670796..8ba7591ee79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.TString; @@ -44,6 +45,9 @@ opType = StaticRegexFullMatch.OP_NAME, inputsClass = StaticRegexFullMatch.Inputs.class ) +@Operator( + group = "strings" +) public final class StaticRegexFullMatch extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java index a43d580e2be..a7a156a731f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -39,6 +40,9 @@ opType = StaticRegexReplace.OP_NAME, inputsClass = StaticRegexReplace.Inputs.class ) +@Operator( + group = "strings" +) public final class StaticRegexReplace extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java index 1e720453925..0a1ee1dff0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java @@ -157,10 +157,10 @@ public static class Inputs extends RawOpInputs { public final Operand input; /** - * The unit that is counted to compute string length. One of: `"BYTE"` (for - * the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + * The unit that is counted to compute string length. One of: {@code "BYTE"} (for + * the number of bytes in each string) or {@code "UTF8_CHAR"} (for the number of UTF-8 * encoded Unicode code points in each string). Results are undefined - * if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + * if {@code unit=UTF8_CHAR} and the {@code input} strings do not contain structurally * valid UTF-8. */ public final String unit; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java index ac5c914bdb1..c04fa6cd987 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java @@ -40,8 +40,6 @@ * This op accepts a ragged tensor with 1 ragged dimension containing only * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. - * - * @param data type for {@code ngrams_splits} output */ @OpMetadata( opType = StringNGrams.OP_NAME, @@ -144,7 +142,7 @@ public static class Inputs extends RawOpInputs dataSplits; /** - * The string to append between elements of the token. Use "" for no separator. + * The string to append between elements of the token. Use "" for no separator. */ public final String separator; @@ -168,7 +166,7 @@ public static class Inputs extends RawOpInputs { public final Operand sep; /** - * An `int`. If `maxsplit > 0`, limit of the split of the result. + * An {@code int}. If {@code maxsplit > 0}, limit of the split of the result. */ public final long maxsplit; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java index 536a8b7f9c2..868f2bc239b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java @@ -233,10 +233,10 @@ public static class Inputs extends RawOpInputs { public final DataType T; /** - * The unit that is used to create the substring. One of: `"BYTE"` (for - * defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 - * encoded Unicode code points). The default is `"BYTE"`. Results are undefined if - * `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + * The unit that is used to create the substring. One of: {@code "BYTE"} (for + * defining position and length by bytes) or {@code "UTF8_CHAR"} (for the UTF-8 + * encoded Unicode code points). The default is {@code "BYTE"}. Results are undefined if + * {@code unit=UTF8_CHAR} and the {@code input} strings do not contain structurally valid * UTF-8. */ public final String unit; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java index c1032a598a7..74e4816ed43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java @@ -50,8 +50,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ToNumber.OP_NAME, @@ -132,7 +130,7 @@ public static class Inputs extends RawOpInputs> { public final Operand stringTensor; /** - * The numeric type to interpret each string in `string_tensor` as. + * The numeric type to interpret each string in {@code string_tensor} as. */ public final DataType outType; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java index b11541057cf..40624c66adf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -51,13 +52,14 @@ *

  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th * string (in row-major order).
  • * - * - * @param data type for {@code row_splits} output */ @OpMetadata( opType = UnicodeDecode.OP_NAME, inputsClass = UnicodeDecode.Inputs.class ) +@Operator( + group = "strings" +) public final class UnicodeDecode extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -259,7 +261,7 @@ public static class Inputs extends RawOpInputs> { /** * Text encoding of the input strings. This is any of the encodings supported - * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. */ public final String inputEncoding; @@ -268,7 +270,7 @@ public static class Inputs extends RawOpInputs> { * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * {@code replacement_char} codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. */ @@ -276,7 +278,7 @@ public static class Inputs extends RawOpInputs> { /** * The replacement character codepoint to be used in place of any invalid - * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * formatting in the input when {@code errors='replace'}. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) */ @@ -284,7 +286,7 @@ public static class Inputs extends RawOpInputs> { /** * Whether to replace the C0 control characters (00-1F) with the - * `replacement_char`. Default is false. + * {@code replacement_char}. Default is false. */ public final boolean replaceControlCharacters; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java index a90e55171de..5989e8e7106 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -55,13 +56,14 @@ *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th * string (in row-major order).
  • * - * - * @param data type for {@code row_splits} output */ @OpMetadata( opType = UnicodeDecodeWithOffsets.OP_NAME, inputsClass = UnicodeDecodeWithOffsets.Inputs.class ) +@Operator( + group = "strings" +) public final class UnicodeDecodeWithOffsets extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -276,7 +278,7 @@ public static class Inputs extends RawOpInputs> { /** * Text encoding of the input strings. This is any of the encodings supported - * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. */ public final String inputEncoding; @@ -285,7 +287,7 @@ public static class Inputs extends RawOpInputs> { * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * {@code replacement_char} codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. */ @@ -293,7 +295,7 @@ public static class Inputs extends RawOpInputs> { /** * The replacement character codepoint to be used in place of any invalid - * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * formatting in the input when {@code errors='replace'}. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) */ @@ -301,7 +303,7 @@ public static class Inputs extends RawOpInputs> { /** * Whether to replace the C0 control characters (00-1F) with the - * `replacement_char`. Default is false. + * {@code replacement_char}. Default is false. */ public final boolean replaceControlCharacters; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java index 7f30b6eb99a..074ff7af61b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -53,6 +54,9 @@ opType = UnicodeEncode.OP_NAME, inputsClass = UnicodeEncode.Inputs.class ) +@Operator( + group = "strings" +) public final class UnicodeEncode extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -208,21 +212,20 @@ public static class Inputs extends RawOpInputs { * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * {@code replacement_char} codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. */ public final String errors; /** - * Unicode encoding of the output strings. Valid encodings are: `"UTF-8", - * "UTF-16-BE", and "UTF-32-BE"`. + * Unicode encoding of the output strings. Valid encodings are: {@code "UTF-8", "UTF-16-BE", and "UTF-32-BE"}. */ public final String outputEncoding; /** * The replacement character codepoint to be used in place of any invalid - * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * formatting in the input when {@code errors='replace'}. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD (U+65533). */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java index 7dee74bb545..56bfc2e406e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java @@ -258,13 +258,13 @@ public static class Inputs extends RawOpInputs { /** * Text encoding of the input strings. This is any of the encodings supported - * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * by ICU ucnv algorithmic converters. Examples: {@code "UTF-16", "US ASCII", "UTF-8"}. */ public final String inputEncoding; /** * The unicode encoding to use in the output. Must be one of - * `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. + * {@code "UTF-8", "UTF-16-BE", "UTF-32-BE"}. Multi-byte encodings will be big-endian. */ public final String outputEncoding; @@ -273,7 +273,7 @@ public static class Inputs extends RawOpInputs { * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * {@code replacement_char} codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. */ @@ -281,11 +281,10 @@ public static class Inputs extends RawOpInputs { /** * The replacement character codepoint to be used in place of any invalid - * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * formatting in the input when {@code errors='replace'}. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * - * Note that for UTF-8, passing a replacement character expressible in 1 byte, such + *

    Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte * replacement character will preserve byte alignment to the source. @@ -294,7 +293,7 @@ public static class Inputs extends RawOpInputs { /** * Whether to replace the C0 control characters (00-1F) with the - * `replacement_char`. Default is false. + * {@code replacement_char}. Default is false. */ public final boolean replaceControlCharacters; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java index 9b4be85a8c6..b9d82efab59 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -40,6 +41,9 @@ opType = UnsortedSegmentJoin.OP_NAME, inputsClass = UnsortedSegmentJoin.Inputs.class ) +@Operator( + group = "strings" +) public final class UnsortedSegmentJoin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java index f7854678822..230e5a618a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java @@ -146,7 +146,7 @@ public static class Inputs extends RawOpInputs { public final Operand input; /** - * Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Character encoding of {@code input}. Allowed values are '' and 'utf-8'. * Value '' is interpreted as ASCII. */ public final String encoding; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java index f8415b30828..6621499b9ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = CloseSummaryWriter.OP_NAME, inputsClass = CloseSummaryWriter.Inputs.class ) +@Operator( + group = "summary" +) public final class CloseSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java index b3ea51287c2..5a24e79a5f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,6 +39,9 @@ opType = CreateSummaryDbWriter.OP_NAME, inputsClass = CreateSummaryDbWriter.Inputs.class ) +@Operator( + group = "summary" +) public final class CreateSummaryDbWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java index 806e764f186..1e62bfe05c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = CreateSummaryFileWriter.OP_NAME, inputsClass = CreateSummaryFileWriter.Inputs.class ) +@Operator( + group = "summary" +) public final class CreateSummaryFileWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java index da07070af44..3faedb9a03b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = FlushSummaryWriter.OP_NAME, inputsClass = FlushSummaryWriter.Inputs.class ) +@Operator( + group = "summary" +) public final class FlushSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java index 38a842be645..a9723ef2fcb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,6 +39,9 @@ opType = ImportEvent.OP_NAME, inputsClass = ImportEvent.Inputs.class ) +@Operator( + group = "summary" +) public final class ImportEvent extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java index f60238e781c..b604a6b85ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = StatsAggregatorSummary.OP_NAME, inputsClass = StatsAggregatorSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class StatsAggregatorSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java index c5b7d4100c7..d891f9b35a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -38,6 +39,9 @@ opType = SummaryWriter.OP_NAME, inputsClass = SummaryWriter.Inputs.class ) +@Operator( + group = "summary" +) public final class SummaryWriter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java index 559b1cfb69b..9ba1858b59e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -42,6 +43,9 @@ opType = WriteAudioSummary.OP_NAME, inputsClass = WriteAudioSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteAudioSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java index feef8cad613..565ef3940f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = WriteGraphSummary.OP_NAME, inputsClass = WriteGraphSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteGraphSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java index 52f3deb5adf..ba431bc0d7e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -41,6 +42,9 @@ opType = WriteHistogramSummary.OP_NAME, inputsClass = WriteHistogramSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteHistogramSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java index 1b6676414a0..12d4578a8b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -44,6 +45,9 @@ opType = WriteImageSummary.OP_NAME, inputsClass = WriteImageSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteImageSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java index fd559e1d210..d56f66e11c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = WriteRawProtoSummary.OP_NAME, inputsClass = WriteRawProtoSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteRawProtoSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java index 11c4f3978b0..d7055fb14dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -42,6 +43,9 @@ opType = WriteScalarSummary.OP_NAME, inputsClass = WriteScalarSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteScalarSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java index a4d040a550d..31a5a470394 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -41,6 +42,9 @@ opType = WriteSummary.OP_NAME, inputsClass = WriteSummary.Inputs.class ) +@Operator( + group = "summary" +) public final class WriteSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java index 4ff3db65f88..3bd1592cbc7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -48,13 +49,14 @@ * split_count=2 *

    replica 0's output: {@code [[A], [C]]} * replica 1's output: {@code [[B], [D]]} - * - * @param data type for {@code output} output */ @OpMetadata( opType = AllToAll.OP_NAME, inputsClass = AllToAll.Inputs.class ) +@Operator( + group = "tpu" +) public final class AllToAll extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java index e4272d3223b..f6253a3f89f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -41,6 +42,9 @@ opType = CompilationResult.OP_NAME, inputsClass = CompilationResult.Inputs.class ) +@Operator( + group = "tpu" +) public final class CompilationResult extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java new file mode 100644 index 00000000000..6ff27567e92 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java @@ -0,0 +1,128 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TInt32; + +/** + * An op computes the size of the deduplication data from embedding core and returns the updated config. + * This op is to compute size of the deduplication data so to provide this + * information to the op that computes the tuple mask of deduplication data can + * have static output shape. + */ +@OpMetadata( + opType = ComputeDedupDataSize.OP_NAME, + inputsClass = ComputeDedupDataSize.Inputs.class +) +public final class ComputeDedupDataSize extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ComputeDedupDataSizeV2"; + + private Output numElements; + + public ComputeDedupDataSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + numElements = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ComputeDedupDataSizeV2 operation. + * + * @param scope current scope + * @param config Serialized TPUEmbeddingConfiguration proto. + * @param embeddingPartitions Serialized EmbeddingPartitionsProto proto. + * @param hbmBuffersConfig Serialized HbmBuffersConfig proto. + * @param tpuTopology Serialized TpuTopologyArgsProto proto. + * @return a new instance of ComputeDedupDataSize + */ + @Endpoint( + describeByClass = true + ) + public static ComputeDedupDataSize create(Scope scope, String config, String embeddingPartitions, + String hbmBuffersConfig, String tpuTopology) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ComputeDedupDataSize"); + opBuilder.setAttr("config", config); + opBuilder.setAttr("embedding_partitions", embeddingPartitions); + opBuilder.setAttr("hbm_buffers_config", hbmBuffersConfig); + opBuilder.setAttr("tpu_topology", tpuTopology); + return new ComputeDedupDataSize(opBuilder.build()); + } + + /** + * Gets numElements. + * The size of the deduplicated data from infeed. + * @return numElements. + */ + public Output numElements() { + return numElements; + } + + @Override + public Output asOutput() { + return numElements; + } + + @OpInputsMetadata( + outputsClass = ComputeDedupDataSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * Serialized TPUEmbeddingConfiguration proto. + */ + public final String config; + + /** + * Serialized EmbeddingPartitionsProto proto. + */ + public final String embeddingPartitions; + + /** + * Serialized HbmBuffersConfig proto. + */ + public final String hbmBuffersConfig; + + /** + * Serialized TpuTopologyArgsProto proto. + */ + public final String tpuTopology; + + public Inputs(GraphOperation op) { + super(new ComputeDedupDataSize(op), op, Arrays.asList("config", "embedding_partitions", "hbm_buffers_config", "tpu_topology")); + int inputIndex = 0; + config = op.attributes().getAttrString("config"); + embeddingPartitions = op.attributes().getAttrString("embedding_partitions"); + hbmBuffersConfig = op.attributes().getAttrString("hbm_buffers_config"); + tpuTopology = op.attributes().getAttrString("tpu_topology"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataTupleMask.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataTupleMask.java index 1eceb3fb43e..1160a8536a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataTupleMask.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataTupleMask.java @@ -45,7 +45,7 @@ public final class ComputeDedupDataTupleMask extends RawOp implements Operand outputShape; @@ -56,18 +56,25 @@ public ComputeDedupDataTupleMask(Operation operation) { } /** - * Factory method to create a class wrapping a new ComputeDedupDataTupleMask operation. + * Factory method to create a class wrapping a new ComputeDedupDataTupleMaskV2 operation. * * @param scope current scope * @param config Serialized TPUEmbeddingConfiguration proto. + * @param embeddingPartitions Serialized EmbeddingPartitionsProto proto. + * @param hbmBuffersConfig Serialized HbmBuffersConfig proto. + * @param tpuTopology Serialized TpuTopologyArgsProto proto. * @return a new instance of ComputeDedupDataTupleMask */ @Endpoint( describeByClass = true ) - public static ComputeDedupDataTupleMask create(Scope scope, String config) { + public static ComputeDedupDataTupleMask create(Scope scope, String config, + String embeddingPartitions, String hbmBuffersConfig, String tpuTopology) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ComputeDedupDataTupleMask"); opBuilder.setAttr("config", config); + opBuilder.setAttr("embedding_partitions", embeddingPartitions); + opBuilder.setAttr("hbm_buffers_config", hbmBuffersConfig); + opBuilder.setAttr("tpu_topology", tpuTopology); return new ComputeDedupDataTupleMask(opBuilder.build()); } @@ -99,10 +106,28 @@ public static class Inputs extends RawOpInputs { */ public final String config; + /** + * Serialized EmbeddingPartitionsProto proto. + */ + public final String embeddingPartitions; + + /** + * Serialized HbmBuffersConfig proto. + */ + public final String hbmBuffersConfig; + + /** + * Serialized TpuTopologyArgsProto proto. + */ + public final String tpuTopology; + public Inputs(GraphOperation op) { - super(new ComputeDedupDataTupleMask(op), op, Arrays.asList("config")); + super(new ComputeDedupDataTupleMask(op), op, Arrays.asList("config", "embedding_partitions", "hbm_buffers_config", "tpu_topology")); int inputIndex = 0; config = op.attributes().getAttrString("config"); + embeddingPartitions = op.attributes().getAttrString("embedding_partitions"); + hbmBuffersConfig = op.attributes().getAttrString("hbm_buffers_config"); + tpuTopology = op.attributes().getAttrString("tpu_topology"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java index 7c2ce500a30..058a08bcba8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -38,6 +39,9 @@ opType = ConfigureDistributedTPU.OP_NAME, inputsClass = ConfigureDistributedTPU.Inputs.class ) +@Operator( + group = "tpu" +) public final class ConfigureDistributedTPU extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java index 1720e9bcc46..99f92e64fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java @@ -27,6 +27,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; /** * Sets up TPUEmbedding in a distributed TPU system. @@ -35,6 +36,9 @@ opType = ConfigureTPUEmbedding.OP_NAME, inputsClass = ConfigureTPUEmbedding.Inputs.class ) +@Operator( + group = "tpu" +) public final class ConfigureTPUEmbedding extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java new file mode 100644 index 00000000000..efec4caa44a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java @@ -0,0 +1,157 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The ConvertToCooTensor operation + */ +@OpMetadata( + opType = ConvertToCooTensor.OP_NAME, + inputsClass = ConvertToCooTensor.Inputs.class +) +@Operator( + group = "tpu" +) +public final class ConvertToCooTensor extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ConvertToCooTensor"; + + private Output rowIds; + + private Output colIds; + + private Output gains; + + public ConvertToCooTensor(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + rowIds = operation.output(outputIdx++); + colIds = operation.output(outputIdx++); + gains = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new ConvertToCooTensor operation. + * + * @param scope current scope + * @param indicesOrRowSplits The indicesOrRowSplits value + * @param values The values value + * @param weights The weights value + * @param sampleCount The value of the sampleCount attribute + * @param combiner The value of the combiner attribute + * @return a new instance of ConvertToCooTensor + */ + @Endpoint( + describeByClass = true + ) + public static ConvertToCooTensor create(Scope scope, Operand indicesOrRowSplits, + Operand values, Operand weights, Long sampleCount, String combiner) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConvertToCooTensor"); + opBuilder.addInput(indicesOrRowSplits.asOutput()); + opBuilder.addInput(values.asOutput()); + opBuilder.addInput(weights.asOutput()); + opBuilder.setAttr("sample_count", sampleCount); + opBuilder.setAttr("combiner", combiner); + return new ConvertToCooTensor(opBuilder.build()); + } + + /** + * Gets rowIds. + * + * @return rowIds. + */ + public Output rowIds() { + return rowIds; + } + + /** + * Gets colIds. + * + * @return colIds. + */ + public Output colIds() { + return colIds; + } + + /** + * Gets gains. + * + * @return gains. + */ + public Output gains() { + return gains; + } + + @OpInputsMetadata( + outputsClass = ConvertToCooTensor.class + ) + public static class Inputs extends RawOpInputs { + /** + * The indicesOrRowSplits input + */ + public final Operand indicesOrRowSplits; + + /** + * The values input + */ + public final Operand values; + + /** + * The weights input + */ + public final Operand weights; + + /** + * The sampleCount attribute + */ + public final long sampleCount; + + /** + * The combiner attribute + */ + public final String combiner; + + public Inputs(GraphOperation op) { + super(new ConvertToCooTensor(op), op, Arrays.asList("sample_count", "combiner")); + int inputIndex = 0; + indicesOrRowSplits = (Operand) op.input(inputIndex++); + values = (Operand) op.input(inputIndex++); + weights = (Operand) op.input(inputIndex++); + sampleCount = op.attributes().getAttrInt("sample_count"); + combiner = op.attributes().getAttrString("combiner"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java index 0e573ddaa2b..15e942cac31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -40,13 +41,14 @@ * Passing group_assignment={@code [[0,2,4,6],[1,3,5,7]]} sets {@code A, C, E, G} as group 0, * and {@code B, D, F, H} as group 1. Thus we get the outputs: * {@code [A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CrossReplicaSum.OP_NAME, inputsClass = CrossReplicaSum.Inputs.class ) +@Operator( + group = "tpu" +) public final class CrossReplicaSum extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java index a2df3c57f6c..8badd319ee1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -48,6 +49,9 @@ opType = DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.OP_NAME, inputsClass = DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class DynamicEnqueueTPUEmbeddingArbitraryTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.java index 50490bb72ec..48cd749fe92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -42,6 +43,9 @@ opType = DynamicEnqueueTPUEmbeddingRaggedTensorBatch.OP_NAME, inputsClass = DynamicEnqueueTPUEmbeddingRaggedTensorBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class DynamicEnqueueTPUEmbeddingRaggedTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java index 9d8d9ea2947..b952f8edcae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -43,6 +44,9 @@ opType = EmbeddingActivations.OP_NAME, inputsClass = EmbeddingActivations.Inputs.class ) +@Operator( + group = "tpu" +) public final class EmbeddingActivations extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java index ce3b1358616..248871292ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -47,6 +48,9 @@ opType = EnqueueTPUEmbeddingArbitraryTensorBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingArbitraryTensorBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingArbitraryTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -271,7 +275,7 @@ public static class Inputs extends RawOpInputs= 0 and less than the number + * The TPU device to use. Should be >= 0 and less than the number * of TPU cores in the task on which the node is placed. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingBatch.java index 7171cbe5da5..811be9baee8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -40,6 +41,9 @@ opType = EnqueueTPUEmbeddingBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -211,7 +215,7 @@ public static class Inputs extends RawOpInputs { /** * The TPU device to use. This should be -1 when the Op - * is running on a TPU device, and >= 0 when the Op is running on the CPU + * is running on a TPU device, and >= 0 when the Op is running on the CPU * device. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java index 4c23f2c972c..e68de9fd067 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -39,6 +40,9 @@ opType = EnqueueTPUEmbeddingIntegerBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingIntegerBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingIntegerBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -132,7 +136,7 @@ public static class Inputs extends RawOpInputs public final Operand modeOverride; /** - * The TPU device to use. Should be >= 0 and less than the number + * The TPU device to use. Should be >= 0 and less than the number * of TPU cores in the task on which the node is placed. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java index 055da6fb91c..14853b93ac7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -48,6 +49,9 @@ opType = EnqueueTPUEmbeddingRaggedTensorBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingRaggedTensorBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingRaggedTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -368,7 +372,7 @@ public static class Inputs extends RawOpInputs= 0 and less than the number + * The TPU device to use. Should be >= 0 and less than the number * of TPU cores in the task on which the node is placed. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java index efb6acb56fd..9c78459892d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -49,6 +50,9 @@ opType = EnqueueTPUEmbeddingSparseBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingSparseBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingSparseBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -253,7 +257,7 @@ public static class Inputs extends RawOpInputs { public final DataType T3; /** - * The TPU device to use. Should be >= 0 and less than the number + * The TPU device to use. Should be >= 0 and less than the number * of TPU cores in the task on which the node is placed. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java index 1f95961867f..40c6df43f92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -48,6 +49,9 @@ opType = EnqueueTPUEmbeddingSparseTensorBatch.OP_NAME, inputsClass = EnqueueTPUEmbeddingSparseTensorBatch.Inputs.class ) +@Operator( + group = "tpu" +) public final class EnqueueTPUEmbeddingSparseTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -364,7 +368,7 @@ public static class Inputs extends RawOpInputs= 0 and less than the number + * The TPU device to use. Should be >= 0 and less than the number * of TPU cores in the task on which the node is placed. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/FinalizeTPUEmbedding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/FinalizeTPUEmbedding.java index 6ce405ea522..db44a52d05b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/FinalizeTPUEmbedding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/FinalizeTPUEmbedding.java @@ -22,6 +22,7 @@ import org.tensorflow.Operand; import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; @@ -45,14 +46,21 @@ public final class FinalizeTPUEmbedding extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "FinalizeTPUEmbedding"; + public static final String OP_NAME = "FinalizeTPUEmbeddingV2"; + + private Output embeddingPartitions; + + private Output hbmBuffersConfig; public FinalizeTPUEmbedding(Operation operation) { super(operation, OP_NAME); + int outputIdx = 0; + embeddingPartitions = operation.output(outputIdx++); + hbmBuffersConfig = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new FinalizeTPUEmbedding operation. + * Factory method to create a class wrapping a new FinalizeTPUEmbeddingV2 operation. * * @param scope current scope * @param commonConfig A string-encoded common configuration proto containing metadata @@ -73,6 +81,26 @@ public static FinalizeTPUEmbedding create(Scope scope, Operand commonCo return new FinalizeTPUEmbedding(opBuilder.build()); } + /** + * Gets embeddingPartitions. + * A string-encoded embedding partitions proto describing how embedding tables are + * partitioned along their feature and ID. + * @return embeddingPartitions. + */ + public Output embeddingPartitions() { + return embeddingPartitions; + } + + /** + * Gets hbmBuffersConfig. + * A string-encoded HBM buffers config proto specifies where HBM buffers are + * located. + * @return hbmBuffersConfig. + */ + public Output hbmBuffersConfig() { + return hbmBuffersConfig; + } + @OpInputsMetadata( outputsClass = FinalizeTPUEmbedding.class ) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java new file mode 100644 index 00000000000..7746ebadb48 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java @@ -0,0 +1,257 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; + +/** + * The GetMinibatchSplitsWithPhysicalReplica operation + */ +@OpMetadata( + opType = GetMinibatchSplitsWithPhysicalReplica.OP_NAME, + inputsClass = GetMinibatchSplitsWithPhysicalReplica.Inputs.class +) +@Operator( + group = "tpu" +) +public final class GetMinibatchSplitsWithPhysicalReplica extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "GetMinibatchSplitsWithPhysicalReplica"; + + private Output sortedRowIds; + + private Output sortedColIds; + + private Output sortedGains; + + private Output splits; + + private Output idCounts; + + private Output maxIds; + + private Output maxUniques; + + public GetMinibatchSplitsWithPhysicalReplica(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + sortedRowIds = operation.output(outputIdx++); + sortedColIds = operation.output(outputIdx++); + sortedGains = operation.output(outputIdx++); + splits = operation.output(outputIdx++); + idCounts = operation.output(outputIdx++); + maxIds = operation.output(outputIdx++); + maxUniques = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new GetMinibatchSplitsWithPhysicalReplica operation. + * + * @param scope current scope + * @param programKey The programKey value + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param gains The gains value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchSplits The value of the miniBatchSplits attribute + * @return a new instance of GetMinibatchSplitsWithPhysicalReplica + */ + @Endpoint( + describeByClass = true + ) + public static GetMinibatchSplitsWithPhysicalReplica create(Scope scope, + Operand programKey, Operand rowIds, Operand colIds, + Operand gains, Long sampleCount, Long numReplica, Long tableVocabSize, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetMinibatchSplitsWithPhysicalReplica"); + opBuilder.addInput(programKey.asOutput()); + opBuilder.addInput(rowIds.asOutput()); + opBuilder.addInput(colIds.asOutput()); + opBuilder.addInput(gains.asOutput()); + opBuilder.setAttr("sample_count", sampleCount); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("table_vocab_size", tableVocabSize); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("table_name", tableName); + opBuilder.setAttr("mini_batch_splits", miniBatchSplits); + return new GetMinibatchSplitsWithPhysicalReplica(opBuilder.build()); + } + + /** + * Gets sortedRowIds. + * + * @return sortedRowIds. + */ + public Output sortedRowIds() { + return sortedRowIds; + } + + /** + * Gets sortedColIds. + * + * @return sortedColIds. + */ + public Output sortedColIds() { + return sortedColIds; + } + + /** + * Gets sortedGains. + * + * @return sortedGains. + */ + public Output sortedGains() { + return sortedGains; + } + + /** + * Gets splits. + * + * @return splits. + */ + public Output splits() { + return splits; + } + + /** + * Gets idCounts. + * + * @return idCounts. + */ + public Output idCounts() { + return idCounts; + } + + /** + * Gets maxIds. + * + * @return maxIds. + */ + public Output maxIds() { + return maxIds; + } + + /** + * Gets maxUniques. + * + * @return maxUniques. + */ + public Output maxUniques() { + return maxUniques; + } + + @OpInputsMetadata( + outputsClass = GetMinibatchSplitsWithPhysicalReplica.class + ) + public static class Inputs extends RawOpInputs { + /** + * The programKey input + */ + public final Operand programKey; + + /** + * The rowIds input + */ + public final Operand rowIds; + + /** + * The colIds input + */ + public final Operand colIds; + + /** + * The gains input + */ + public final Operand gains; + + /** + * The sampleCount attribute + */ + public final long sampleCount; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The tableVocabSize attribute + */ + public final long tableVocabSize; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The numScPerChip attribute + */ + public final long numScPerChip; + + /** + * The tableName attribute + */ + public final String tableName; + + /** + * The miniBatchSplits attribute + */ + public final String miniBatchSplits; + + public Inputs(GraphOperation op) { + super(new GetMinibatchSplitsWithPhysicalReplica(op), op, Arrays.asList("sample_count", "num_replica", "table_vocab_size", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_splits")); + int inputIndex = 0; + programKey = (Operand) op.input(inputIndex++); + rowIds = (Operand) op.input(inputIndex++); + colIds = (Operand) op.input(inputIndex++); + gains = (Operand) op.input(inputIndex++); + sampleCount = op.attributes().getAttrInt("sample_count"); + numReplica = op.attributes().getAttrInt("num_replica"); + tableVocabSize = op.attributes().getAttrInt("table_vocab_size"); + featureWidth = op.attributes().getAttrInt("feature_width"); + numScPerChip = op.attributes().getAttrInt("num_sc_per_chip"); + tableName = op.attributes().getAttrString("table_name"); + miniBatchSplits = op.attributes().getAttrString("mini_batch_splits"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java new file mode 100644 index 00000000000..d51f1c3959f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java @@ -0,0 +1,290 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.TString; + +/** + * The GetMinibatchesInCsrWithPhysicalReplica operation + */ +@OpMetadata( + opType = GetMinibatchesInCsrWithPhysicalReplica.OP_NAME, + inputsClass = GetMinibatchesInCsrWithPhysicalReplica.Inputs.class +) +@Operator( + group = "tpu" +) +public final class GetMinibatchesInCsrWithPhysicalReplica extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "GetMinibatchesInCsrWithPhysicalReplica"; + + private Output rowPointers; + + private Output sortedSampleIds; + + private Output sortedTokenIds; + + private Output sortedGains; + + private Output rowPointersUnpaddedSize; + + private Output idsUnpaddedSize; + + private Output numMinibatchesPerPhysicalSparseCore; + + public GetMinibatchesInCsrWithPhysicalReplica(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + rowPointers = operation.output(outputIdx++); + sortedSampleIds = operation.output(outputIdx++); + sortedTokenIds = operation.output(outputIdx++); + sortedGains = operation.output(outputIdx++); + rowPointersUnpaddedSize = operation.output(outputIdx++); + idsUnpaddedSize = operation.output(outputIdx++); + numMinibatchesPerPhysicalSparseCore = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new GetMinibatchesInCsrWithPhysicalReplica operation. + * + * @param scope current scope + * @param programKey The programKey value + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param gains The gains value + * @param splits The splits value + * @param idCounts The idCounts value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute + * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute + * @param tableVocabSize The value of the tableVocabSize attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchInCsr The value of the miniBatchInCsr attribute + * @return a new instance of GetMinibatchesInCsrWithPhysicalReplica + */ + @Endpoint( + describeByClass = true + ) + public static GetMinibatchesInCsrWithPhysicalReplica create(Scope scope, + Operand programKey, Operand rowIds, Operand colIds, + Operand gains, Operand splits, Operand idCounts, Long sampleCount, + Long numReplica, Long maxMinibatchesPerSc, Long maxIdsPerChipPerSample, Long tableVocabSize, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchInCsr) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetMinibatchesInCsrWithPhysicalReplica"); + opBuilder.addInput(programKey.asOutput()); + opBuilder.addInput(rowIds.asOutput()); + opBuilder.addInput(colIds.asOutput()); + opBuilder.addInput(gains.asOutput()); + opBuilder.addInput(splits.asOutput()); + opBuilder.addInput(idCounts.asOutput()); + opBuilder.setAttr("sample_count", sampleCount); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("max_minibatches_per_sc", maxMinibatchesPerSc); + opBuilder.setAttr("max_ids_per_chip_per_sample", maxIdsPerChipPerSample); + opBuilder.setAttr("table_vocab_size", tableVocabSize); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("table_name", tableName); + opBuilder.setAttr("mini_batch_in_csr", miniBatchInCsr); + return new GetMinibatchesInCsrWithPhysicalReplica(opBuilder.build()); + } + + /** + * Gets rowPointers. + * + * @return rowPointers. + */ + public Output rowPointers() { + return rowPointers; + } + + /** + * Gets sortedSampleIds. + * + * @return sortedSampleIds. + */ + public Output sortedSampleIds() { + return sortedSampleIds; + } + + /** + * Gets sortedTokenIds. + * + * @return sortedTokenIds. + */ + public Output sortedTokenIds() { + return sortedTokenIds; + } + + /** + * Gets sortedGains. + * + * @return sortedGains. + */ + public Output sortedGains() { + return sortedGains; + } + + /** + * Gets rowPointersUnpaddedSize. + * + * @return rowPointersUnpaddedSize. + */ + public Output rowPointersUnpaddedSize() { + return rowPointersUnpaddedSize; + } + + /** + * Gets idsUnpaddedSize. + * + * @return idsUnpaddedSize. + */ + public Output idsUnpaddedSize() { + return idsUnpaddedSize; + } + + /** + * Gets numMinibatchesPerPhysicalSparseCore. + * + * @return numMinibatchesPerPhysicalSparseCore. + */ + public Output numMinibatchesPerPhysicalSparseCore() { + return numMinibatchesPerPhysicalSparseCore; + } + + @OpInputsMetadata( + outputsClass = GetMinibatchesInCsrWithPhysicalReplica.class + ) + public static class Inputs extends RawOpInputs { + /** + * The programKey input + */ + public final Operand programKey; + + /** + * The rowIds input + */ + public final Operand rowIds; + + /** + * The colIds input + */ + public final Operand colIds; + + /** + * The gains input + */ + public final Operand gains; + + /** + * The splits input + */ + public final Operand splits; + + /** + * The idCounts input + */ + public final Operand idCounts; + + /** + * The sampleCount attribute + */ + public final long sampleCount; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The maxMinibatchesPerSc attribute + */ + public final long maxMinibatchesPerSc; + + /** + * The maxIdsPerChipPerSample attribute + */ + public final long maxIdsPerChipPerSample; + + /** + * The tableVocabSize attribute + */ + public final long tableVocabSize; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The numScPerChip attribute + */ + public final long numScPerChip; + + /** + * The tableName attribute + */ + public final String tableName; + + /** + * The miniBatchInCsr attribute + */ + public final String miniBatchInCsr; + + public Inputs(GraphOperation op) { + super(new GetMinibatchesInCsrWithPhysicalReplica(op), op, Arrays.asList("sample_count", "num_replica", "max_minibatches_per_sc", "max_ids_per_chip_per_sample", "table_vocab_size", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_in_csr")); + int inputIndex = 0; + programKey = (Operand) op.input(inputIndex++); + rowIds = (Operand) op.input(inputIndex++); + colIds = (Operand) op.input(inputIndex++); + gains = (Operand) op.input(inputIndex++); + splits = (Operand) op.input(inputIndex++); + idCounts = (Operand) op.input(inputIndex++); + sampleCount = op.attributes().getAttrInt("sample_count"); + numReplica = op.attributes().getAttrInt("num_replica"); + maxMinibatchesPerSc = op.attributes().getAttrInt("max_minibatches_per_sc"); + maxIdsPerChipPerSample = op.attributes().getAttrInt("max_ids_per_chip_per_sample"); + tableVocabSize = op.attributes().getAttrInt("table_vocab_size"); + featureWidth = op.attributes().getAttrInt("feature_width"); + numScPerChip = op.attributes().getAttrInt("num_sc_per_chip"); + tableName = op.attributes().getAttrString("table_name"); + miniBatchInCsr = op.attributes().getAttrString("mini_batch_in_csr"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetTpuTaskId.java similarity index 52% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetTpuTaskId.java index 5119ddc0d74..c4eb00be8dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetTpuTaskId.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.tpu; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -29,80 +29,69 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TInt32; /** - * The RiscLogicalAnd operation + * An op returns the TPU task ID from TPU topology. + * This op is to return the TPU task ID from TPU topology. */ @OpMetadata( - opType = RiscLogicalAnd.OP_NAME, - inputsClass = RiscLogicalAnd.Inputs.class + opType = GetTpuTaskId.OP_NAME, + inputsClass = GetTpuTaskId.Inputs.class ) -public final class RiscLogicalAnd extends RawOp implements Operand { +@Operator( + group = "tpu" +) +public final class GetTpuTaskId extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscLogicalAnd"; + public static final String OP_NAME = "GetTpuTaskId"; - private Output z; + private Output tpuTaskId; - public RiscLogicalAnd(Operation operation) { + public GetTpuTaskId(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; - z = operation.output(outputIdx++); + tpuTaskId = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscLogicalAnd operation. + * Factory method to create a class wrapping a new GetTpuTaskId operation. * * @param scope current scope - * @param x The x value - * @param y The y value - * @return a new instance of RiscLogicalAnd + * @return a new instance of GetTpuTaskId */ @Endpoint( describeByClass = true ) - public static RiscLogicalAnd create(Scope scope, Operand x, Operand y) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscLogicalAnd"); - opBuilder.addInput(x.asOutput()); - opBuilder.addInput(y.asOutput()); - return new RiscLogicalAnd(opBuilder.build()); + public static GetTpuTaskId create(Scope scope) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetTpuTaskId"); + return new GetTpuTaskId(opBuilder.build()); } /** - * Gets z. - * - * @return z. + * Gets tpuTaskId. + * The TPU task ID from TPU topology. + * @return tpuTaskId. */ - public Output z() { - return z; + public Output tpuTaskId() { + return tpuTaskId; } @Override - public Output asOutput() { - return z; + public Output asOutput() { + return tpuTaskId; } @OpInputsMetadata( - outputsClass = RiscLogicalAnd.class + outputsClass = GetTpuTaskId.class ) - public static class Inputs extends RawOpInputs { - /** - * The x input - */ - public final Operand x; - - /** - * The y input - */ - public final Operand y; - + public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { - super(new RiscLogicalAnd(op), op, Arrays.asList()); + super(new GetTpuTaskId(op), op, Arrays.asList()); int inputIndex = 0; - x = (Operand) op.input(inputIndex++); - y = (Operand) op.input(inputIndex++); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java similarity index 56% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java index 1da8a281e85..f0f71accb37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.risc; +package org.tensorflow.op.tpu; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -29,72 +29,68 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TBool; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TInt64; /** - * The RiscLogicalNot operation + * The GlobalIterId operation */ @OpMetadata( - opType = RiscLogicalNot.OP_NAME, - inputsClass = RiscLogicalNot.Inputs.class + opType = GlobalIterId.OP_NAME, + inputsClass = GlobalIterId.Inputs.class ) -public final class RiscLogicalNot extends RawOp implements Operand { +@Operator( + group = "tpu" +) +public final class GlobalIterId extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RiscLogicalNot"; + public static final String OP_NAME = "GlobalIterId"; - private Output z; + private Output iterId; - public RiscLogicalNot(Operation operation) { + public GlobalIterId(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; - z = operation.output(outputIdx++); + iterId = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RiscLogicalNot operation. + * Factory method to create a class wrapping a new GlobalIterId operation. * * @param scope current scope - * @param x The x value - * @return a new instance of RiscLogicalNot + * @return a new instance of GlobalIterId */ @Endpoint( describeByClass = true ) - public static RiscLogicalNot create(Scope scope, Operand x) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RiscLogicalNot"); - opBuilder.addInput(x.asOutput()); - return new RiscLogicalNot(opBuilder.build()); + public static GlobalIterId create(Scope scope) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GlobalIterId"); + return new GlobalIterId(opBuilder.build()); } /** - * Gets z. + * Gets iterId. * - * @return z. + * @return iterId. */ - public Output z() { - return z; + public Output iterId() { + return iterId; } @Override - public Output asOutput() { - return z; + public Output asOutput() { + return iterId; } @OpInputsMetadata( - outputsClass = RiscLogicalNot.class + outputsClass = GlobalIterId.class ) - public static class Inputs extends RawOpInputs { - /** - * The x input - */ - public final Operand x; - + public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { - super(new RiscLogicalNot(op), op, Arrays.asList()); + super(new GlobalIterId(op), op, Arrays.asList()); int inputIndex = 0; - x = (Operand) op.input(inputIndex++); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java index 86eac21375e..2f2d689a23a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java @@ -31,18 +31,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * A placeholder op for a value that will be fed into the computation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = InfeedDequeue.OP_NAME, inputsClass = InfeedDequeue.Inputs.class ) +@Operator( + group = "tpu" +) public final class InfeedDequeue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java index 26165215fd9..1aaa438e7c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +44,9 @@ opType = InfeedDequeueTuple.OP_NAME, inputsClass = InfeedDequeueTuple.Inputs.class ) +@Operator( + group = "tpu" +) public final class InfeedDequeueTuple extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -103,12 +107,12 @@ public Iterator> iterator() { ) public static class Inputs extends RawOpInputs { /** - * The element types of each element in `outputs`. + * The element types of each element in {@code outputs}. */ public final DataType[] dtypes; /** - * The shapes of each tensor in `outputs`. + * The shapes of each tensor in {@code outputs}. */ public final Shape[] shapes; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java index 582a258d5a3..bc6c1e51a79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = InfeedEnqueue.OP_NAME, inputsClass = InfeedEnqueue.Inputs.class ) +@Operator( + group = "tpu" +) public final class InfeedEnqueue extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -223,7 +227,7 @@ public static class Inputs extends RawOpInputs { /** * The TPU device to use. This should be -1 when the Op - * is running on a TPU device, and >= 0 when the Op is running on the CPU + * is running on a TPU device, and >= 0 when the Op is running on the CPU * device. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java index 03e8ec9a5e3..0e2294decc2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** @@ -37,6 +38,9 @@ opType = InfeedEnqueuePrelinearizedBuffer.OP_NAME, inputsClass = InfeedEnqueuePrelinearizedBuffer.Inputs.class ) +@Operator( + group = "tpu" +) public final class InfeedEnqueuePrelinearizedBuffer extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java index d6ca9a6a174..faae10fe8a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; /** @@ -40,6 +41,9 @@ opType = InfeedEnqueueTuple.OP_NAME, inputsClass = InfeedEnqueueTuple.Inputs.class ) +@Operator( + group = "tpu" +) public final class InfeedEnqueueTuple extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -189,18 +193,18 @@ public static class Inputs extends RawOpInputs { public final Iterable> inputs; /** - * The element types of each element in `inputs`. + * The element types of each element in {@code inputs}. */ public final DataType[] dtypes; /** - * The shapes of each tensor in `inputs`. + * The shapes of each tensor in {@code inputs}. */ public final Shape[] shapes; /** * A vector holding the requested layout in minor-to-major sequence for - * all the tuple shapes, in the order the shapes appear in the "shapes" input. + * all the tuple shapes, in the order the shapes appear in the "shapes" input. * The layout elements for a sub-shape can be set to -1, in which case the * corresponding layout will be computed by the infeed operation. */ @@ -208,7 +212,7 @@ public static class Inputs extends RawOpInputs { /** * The TPU device to use. This should be -1 when the Op - * is running on a TPU device, and >= 0 when the Op is running on the CPU + * is running on a TPU device, and >= 0 when the Op is running on the CPU * device. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java index e2e17414adc..a34b2fd2361 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; /** @@ -38,6 +39,9 @@ opType = IsTPUEmbeddingInitialized.OP_NAME, inputsClass = IsTPUEmbeddingInitialized.Inputs.class ) +@Operator( + group = "tpu" +) public final class IsTPUEmbeddingInitialized extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadAllTPUEmbeddingParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadAllTPUEmbeddingParameters.java index 977173e1588..7729db3d126 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadAllTPUEmbeddingParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadAllTPUEmbeddingParameters.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -46,6 +47,9 @@ opType = LoadAllTPUEmbeddingParameters.OP_NAME, inputsClass = LoadAllTPUEmbeddingParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadAllTPUEmbeddingParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java index be276ade9ab..284da4dccac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingADAMParameters.OP_NAME, inputsClass = LoadTPUEmbeddingADAMParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingADAMParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java index 1e5828f08de..f82fc61402b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingAdadeltaParameters.OP_NAME, inputsClass = LoadTPUEmbeddingAdadeltaParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingAdadeltaParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java index 4475f85af31..54f09315582 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingAdagradMomentumParameters.OP_NAME, inputsClass = LoadTPUEmbeddingAdagradMomentumParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingAdagradMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java index 3568a65bd7b..fba43cc1e1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingAdagradParameters.OP_NAME, inputsClass = LoadTPUEmbeddingAdagradParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java index 8a5eb8ac4d8..274e83ee16e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingCenteredRMSPropParameters.OP_NAME, inputsClass = LoadTPUEmbeddingCenteredRMSPropParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingCenteredRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java index f8833e90843..d765e76ddf5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingFTRLParameters.OP_NAME, inputsClass = LoadTPUEmbeddingFTRLParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingFTRLParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java index e8d45f51acd..89988b64472 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingFrequencyEstimatorParameters.OP_NAME, inputsClass = LoadTPUEmbeddingFrequencyEstimatorParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingFrequencyEstimatorParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java index 3c0bbd29f4d..1bb174085a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingMDLAdagradLightParameters.OP_NAME, inputsClass = LoadTPUEmbeddingMDLAdagradLightParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingMDLAdagradLightParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java index a647db5ab54..a2588e51af9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingMomentumParameters.OP_NAME, inputsClass = LoadTPUEmbeddingMomentumParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java index 737a5f081cd..f7f2ec6524b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingProximalAdagradParameters.OP_NAME, inputsClass = LoadTPUEmbeddingProximalAdagradParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingProximalAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java index 90bf2fc4cf1..670993c8cbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -37,6 +38,9 @@ opType = LoadTPUEmbeddingProximalYogiParameters.OP_NAME, inputsClass = LoadTPUEmbeddingProximalYogiParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingProximalYogiParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java index f75e94c7795..02c378df53e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingRMSPropParameters.OP_NAME, inputsClass = LoadTPUEmbeddingRMSPropParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java index a35574afbb4..cb0883872c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = LoadTPUEmbeddingStochasticGradientDescentParameters.OP_NAME, inputsClass = LoadTPUEmbeddingStochasticGradientDescentParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class LoadTPUEmbeddingStochasticGradientDescentParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/MergeDedupData.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/MergeDedupData.java index da1f35645ec..e524db43c5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/MergeDedupData.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/MergeDedupData.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -45,6 +46,9 @@ opType = MergeDedupData.OP_NAME, inputsClass = MergeDedupData.Inputs.class ) +@Operator( + group = "tpu" +) public final class MergeDedupData extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java index 1f54ab4ed7c..5df0d72c590 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; /** @@ -41,6 +42,9 @@ opType = OrdinalSelector.OP_NAME, inputsClass = OrdinalSelector.Inputs.class ) +@Operator( + group = "tpu" +) public final class OrdinalSelector extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java index 64123272bf8..f2043c5047c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java @@ -31,19 +31,21 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * Retrieves a single tensor from the computation outfeed. * This operation will block indefinitely until data is available. - * - * @param data type for {@code output} output */ @OpMetadata( opType = OutfeedDequeue.OP_NAME, inputsClass = OutfeedDequeue.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedDequeue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -151,7 +153,7 @@ public static class Inputs extends RawOpInputs> { /** * The TPU device to use. This should be -1 when the Op - * is running on a TPU device, and >= 0 when the Op is running on the CPU + * is running on a TPU device, and >= 0 when the Op is running on the CPU * device. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java index 9ec3d005ea0..e3256963740 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -45,6 +46,9 @@ opType = OutfeedDequeueTuple.OP_NAME, inputsClass = OutfeedDequeueTuple.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedDequeueTuple extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -148,18 +152,18 @@ public Options deviceOrdinal(Long deviceOrdinal) { ) public static class Inputs extends RawOpInputs { /** - * The element types of each element in `outputs`. + * The element types of each element in {@code outputs}. */ public final DataType[] dtypes; /** - * The shapes of each tensor in `outputs`. + * The shapes of each tensor in {@code outputs}. */ public final Shape[] shapes; /** * The TPU device to use. This should be -1 when the Op - * is running on a TPU device, and >= 0 when the Op is running on the CPU + * is running on a TPU device, and >= 0 when the Op is running on the CPU * device. */ public final long deviceOrdinal; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java index 491a13af3e9..dc78bf09faa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -47,6 +48,9 @@ opType = OutfeedDequeueTupleV2.OP_NAME, inputsClass = OutfeedDequeueTupleV2.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedDequeueTupleV2 extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -118,12 +122,12 @@ public static class Inputs extends RawOpInputs { public final Operand deviceOrdinal; /** - * The element types of each element in `outputs`. + * The element types of each element in {@code outputs}. */ public final DataType[] dtypes; /** - * The shapes of each tensor in `outputs`. + * The shapes of each tensor in {@code outputs}. */ public final Shape[] shapes; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java index 749f1811a27..dc0d6a3649a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,13 +40,14 @@ * Retrieves a single tensor from the computation outfeed. Device ordinal is a * tensor allowing dynamic outfeed. * This operation will block indefinitely until data is available. - * - * @param data type for {@code output} output */ @OpMetadata( opType = OutfeedDequeueV2.OP_NAME, inputsClass = OutfeedDequeueV2.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedDequeueV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java index e0192d4a325..f79f949acbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +39,9 @@ opType = OutfeedEnqueue.OP_NAME, inputsClass = OutfeedEnqueue.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedEnqueue extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java index 09e6d415fbe..df3eb3df0e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; /** @@ -38,6 +39,9 @@ opType = OutfeedEnqueueTuple.OP_NAME, inputsClass = OutfeedEnqueueTuple.Inputs.class ) +@Operator( + group = "tpu" +) public final class OutfeedEnqueueTuple extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java index b1c5e7a3fbe..f20a2de4bda 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java @@ -33,6 +33,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -44,6 +45,9 @@ opType = PartitionedCall.OP_NAME, inputsClass = PartitionedCall.Inputs.class ) +@Operator( + group = "tpu" +) public final class PartitionedCall extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java index d67f21f4430..89d11541c1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java @@ -31,18 +31,20 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; /** * An op that groups a list of partitioned inputs together. Supports ND sharding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PartitionedInput.OP_NAME, inputsClass = PartitionedInput.Inputs.class ) +@Operator( + group = "tpu" +) public final class PartitionedInput extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java index a49b96f066d..b69bdea9a7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java @@ -38,8 +38,6 @@ /** * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned * outputs outside the XLA computation. Supports ND sharding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PartitionedOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java index 7a943b1dae0..d5417a9aa80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = Prelinearize.OP_NAME, inputsClass = Prelinearize.Inputs.class ) +@Operator( + group = "tpu" +) public final class Prelinearize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java index ad07ce8acd3..9c0217fded0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +43,9 @@ opType = PrelinearizeTuple.OP_NAME, inputsClass = PrelinearizeTuple.Inputs.class ) +@Operator( + group = "tpu" +) public final class PrelinearizeTuple extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -181,18 +185,18 @@ public static class Inputs extends RawOpInputs { public final Iterable> inputs; /** - * The element types of each element in `inputs`. + * The element types of each element in {@code inputs}. */ public final DataType[] dtypes; /** - * The shapes of each tensor in `inputs`. + * The shapes of each tensor in {@code inputs}. */ public final Shape[] shapes; /** * A vector holding the requested layout in minor-to-major sequence for all the - * tuple shapes in the order the shapes appear in the "shapes" input. The layout + * tuple shapes in the order the shapes appear in the "shapes" input. The layout * elements for a sub-shape can be set to -1 in which case the corresponding layout * will be computed by the infeed operation. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java index 4b763eff19e..68dcd11b979 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -46,6 +47,9 @@ opType = RecvTPUEmbeddingActivations.OP_NAME, inputsClass = RecvTPUEmbeddingActivations.Inputs.class ) +@Operator( + group = "tpu" +) public final class RecvTPUEmbeddingActivations extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java index cf8d3571d67..67f49871a2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; /** * Metadata indicating how the TPU computation should be replicated. @@ -37,6 +38,9 @@ opType = ReplicateMetadata.OP_NAME, inputsClass = ReplicateMetadata.Inputs.class ) +@Operator( + group = "tpu" +) public final class ReplicateMetadata extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java index 64d8aea2f9a..5f5ae14be0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -45,13 +46,14 @@ * %computation = "tf.Computation"(%replicated_input) * *

    The above computation has a replicated input of two replicas. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReplicatedInput.OP_NAME, inputsClass = ReplicatedInput.Inputs.class ) +@Operator( + group = "tpu" +) public final class ReplicatedInput extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java index ccff2349b3b..6daab9ae1a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -44,13 +45,14 @@ * %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) * *

    The above computation has a replicated output of two replicas. - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = ReplicatedOutput.OP_NAME, inputsClass = ReplicatedOutput.Inputs.class ) +@Operator( + group = "tpu" +) public final class ReplicatedOutput extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveAllTPUEmbeddingParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveAllTPUEmbeddingParameters.java index 9ccd84e69c1..723885b54fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveAllTPUEmbeddingParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveAllTPUEmbeddingParameters.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -46,6 +47,9 @@ opType = RetrieveAllTPUEmbeddingParameters.OP_NAME, inputsClass = RetrieveAllTPUEmbeddingParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveAllTPUEmbeddingParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java index b8242705b8e..3b2c5c54d62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingADAMParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingADAMParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingADAMParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java index b59880a136d..39fc4708c75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingAdadeltaParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingAdadeltaParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingAdadeltaParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java index f13e816cb5a..29120ef3f9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingAdagradMomentumParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingAdagradMomentumParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingAdagradMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java index 4e28c761079..9ec5823b9b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingAdagradParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingAdagradParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java index 99d91c272b9..321d91d8acc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingCenteredRMSPropParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingCenteredRMSPropParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingCenteredRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java index e5f0620bbe1..712be9b6d8f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingFTRLParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingFTRLParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingFTRLParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java index 728e7f38dfe..1f8d062c7d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingFrequencyEstimatorParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingFrequencyEstimatorParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java index f72bf50f310..ab167935ad5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingMDLAdagradLightParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingMDLAdagradLightParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingMDLAdagradLightParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java index 53e683984ca..747f704c7a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingMomentumParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingMomentumParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java index 6337bdae4b4..7bcac9a6f1d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingProximalAdagradParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingProximalAdagradParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingProximalAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java index 943b8002368..ab801fa3778 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -37,6 +38,9 @@ opType = RetrieveTPUEmbeddingProximalYogiParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingProximalYogiParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingProximalYogiParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java index ddb134f6cc2..94904073f5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -41,6 +42,9 @@ opType = RetrieveTPUEmbeddingRMSPropParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingRMSPropParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java index 209a907d72b..b64685d5b06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -42,6 +43,9 @@ opType = RetrieveTPUEmbeddingStochasticGradientDescentParameters.OP_NAME, inputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParameters.Inputs.class ) +@Operator( + group = "tpu" +) public final class RetrieveTPUEmbeddingStochasticGradientDescentParameters extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java index 3fa2fcc1270..ec244c2d780 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -38,6 +39,9 @@ opType = SendTPUEmbeddingGradients.OP_NAME, inputsClass = SendTPUEmbeddingGradients.Inputs.class ) +@Operator( + group = "tpu" +) public final class SendTPUEmbeddingGradients extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java index 39db94bffdc..d3a002317a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java @@ -27,6 +27,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; /** * Shuts down a running distributed TPU system. @@ -36,6 +37,9 @@ opType = ShutdownDistributedTPU.OP_NAME, inputsClass = ShutdownDistributedTPU.Inputs.class ) +@Operator( + group = "tpu" +) public final class ShutdownDistributedTPU extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java index 4e6fa0d8097..8e8d4537dff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -40,15 +41,14 @@ * Deduplication data is an XLA tuple, which consists of integer and floating point * values. This op is to split these values into two groups for two types, and * construct each group as one tensor to return. - * - * @param data type for {@code integer_tensor} output - * - * @param data type for {@code float_tensor} output */ @OpMetadata( opType = SplitDedupData.OP_NAME, inputsClass = SplitDedupData.Inputs.class ) +@Operator( + group = "tpu" +) public final class SplitDedupData extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java new file mode 100644 index 00000000000..a3c05fd31fb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java @@ -0,0 +1,152 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TString; + +/** + * The StoreMinibatchStatisticsInFdo operation + */ +@OpMetadata( + opType = StoreMinibatchStatisticsInFdo.OP_NAME, + inputsClass = StoreMinibatchStatisticsInFdo.Inputs.class +) +@Operator( + group = "tpu" +) +public final class StoreMinibatchStatisticsInFdo extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "StoreMinibatchStatisticsInFdo"; + + public StoreMinibatchStatisticsInFdo(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new StoreMinibatchStatisticsInFdo operation. + * + * @param scope current scope + * @param programKey The programKey value + * @param maxIds The maxIds value + * @param maxUniques The maxUniques value + * @param sampleCount The value of the sampleCount attribute + * @param numReplica The value of the numReplica attribute + * @param featureWidth The value of the featureWidth attribute + * @param numScPerChip The value of the numScPerChip attribute + * @param tableName The value of the tableName attribute + * @param miniBatchSplits The value of the miniBatchSplits attribute + * @return a new instance of StoreMinibatchStatisticsInFdo + */ + @Endpoint( + describeByClass = true + ) + public static StoreMinibatchStatisticsInFdo create(Scope scope, Operand programKey, + Operand maxIds, Operand maxUniques, Long sampleCount, Long numReplica, + Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StoreMinibatchStatisticsInFdo"); + opBuilder.addInput(programKey.asOutput()); + opBuilder.addInput(maxIds.asOutput()); + opBuilder.addInput(maxUniques.asOutput()); + opBuilder.setAttr("sample_count", sampleCount); + opBuilder.setAttr("num_replica", numReplica); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("num_sc_per_chip", numScPerChip); + opBuilder.setAttr("table_name", tableName); + opBuilder.setAttr("mini_batch_splits", miniBatchSplits); + return new StoreMinibatchStatisticsInFdo(opBuilder.build()); + } + + @OpInputsMetadata( + outputsClass = StoreMinibatchStatisticsInFdo.class + ) + public static class Inputs extends RawOpInputs { + /** + * The programKey input + */ + public final Operand programKey; + + /** + * The maxIds input + */ + public final Operand maxIds; + + /** + * The maxUniques input + */ + public final Operand maxUniques; + + /** + * The sampleCount attribute + */ + public final long sampleCount; + + /** + * The numReplica attribute + */ + public final long numReplica; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The numScPerChip attribute + */ + public final long numScPerChip; + + /** + * The tableName attribute + */ + public final String tableName; + + /** + * The miniBatchSplits attribute + */ + public final String miniBatchSplits; + + public Inputs(GraphOperation op) { + super(new StoreMinibatchStatisticsInFdo(op), op, Arrays.asList("sample_count", "num_replica", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_splits")); + int inputIndex = 0; + programKey = (Operand) op.input(inputIndex++); + maxIds = (Operand) op.input(inputIndex++); + maxUniques = (Operand) op.input(inputIndex++); + sampleCount = op.attributes().getAttrInt("sample_count"); + numReplica = op.attributes().getAttrInt("num_replica"); + featureWidth = op.attributes().getAttrInt("feature_width"); + numScPerChip = op.attributes().getAttrInt("num_sc_per_chip"); + tableName = op.attributes().getAttrString("table_name"); + miniBatchSplits = op.attributes().getAttrString("mini_batch_splits"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java new file mode 100644 index 00000000000..a4dc34f7fc4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java @@ -0,0 +1,121 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.family.TType; + +/** + * The TPUAnnotateTensorsWithDynamicShape operation + */ +@OpMetadata( + opType = TPUAnnotateTensorsWithDynamicShape.OP_NAME, + inputsClass = TPUAnnotateTensorsWithDynamicShape.Inputs.class +) +@Operator( + group = "tpu" +) +public final class TPUAnnotateTensorsWithDynamicShape extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "TPUAnnotateTensorsWithDynamicShape"; + + private List> tpuTensors; + + @SuppressWarnings("unchecked") + public TPUAnnotateTensorsWithDynamicShape(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int tpuTensorsLength = operation.outputListLength("tpu_tensors"); + tpuTensors = Arrays.asList(operation.outputList(outputIdx, tpuTensorsLength)); + outputIdx += tpuTensorsLength; + } + + /** + * Factory method to create a class wrapping a new TPUAnnotateTensorsWithDynamicShape operation. + * + * @param scope current scope + * @param tensors The tensors value + * @return a new instance of TPUAnnotateTensorsWithDynamicShape + */ + @Endpoint( + describeByClass = true + ) + public static TPUAnnotateTensorsWithDynamicShape create(Scope scope, + Iterable> tensors) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TPUAnnotateTensorsWithDynamicShape"); + opBuilder.addInputList(Operands.asOutputs(tensors)); + return new TPUAnnotateTensorsWithDynamicShape(opBuilder.build()); + } + + /** + * Gets tpuTensors. + * + * @return tpuTensors. + */ + public List> tpuTensors() { + return tpuTensors; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) tpuTensors.iterator(); + } + + @OpInputsMetadata( + outputsClass = TPUAnnotateTensorsWithDynamicShape.class + ) + public static class Inputs extends RawOpInputs { + /** + * The tensors input + */ + public final Iterable> tensors; + + /** + * The T attribute + */ + public final DataType[] T; + + public Inputs(GraphOperation op) { + super(new TPUAnnotateTensorsWithDynamicShape(op), op, Arrays.asList("T")); + int inputIndex = 0; + int tensorsLength = op.inputListLength("tensors"); + tensors = Arrays.asList((Operand[]) op.inputList(inputIndex, tensorsLength)); + inputIndex += tensorsLength; + T = op.attributes().getAttrTypeList("T"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java index 8afe4ae97e2..6e33eb5f4c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -44,6 +45,9 @@ inputsClass = TPUCompilationResult.Inputs.class ) @Deprecated +@Operator( + group = "tpu" +) public final class TPUCompilationResult extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCopyWithDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCopyWithDynamicShape.java new file mode 100644 index 00000000000..3e79b7672b5 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCopyWithDynamicShape.java @@ -0,0 +1,133 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** + * Op that copies host tensor to device with dynamic shape support. + * For internal use only. + */ +@OpMetadata( + opType = TPUCopyWithDynamicShape.OP_NAME, + inputsClass = TPUCopyWithDynamicShape.Inputs.class +) +@Operator( + group = "tpu" +) +public final class TPUCopyWithDynamicShape extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "TPUCopyWithDynamicShape"; + + private List> tpuTensors; + + @SuppressWarnings("unchecked") + public TPUCopyWithDynamicShape(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int tpuTensorsLength = operation.outputListLength("tpu_tensors"); + tpuTensors = Arrays.asList(operation.outputList(outputIdx, tpuTensorsLength)); + outputIdx += tpuTensorsLength; + } + + /** + * Factory method to create a class wrapping a new TPUCopyWithDynamicShape operation. + * + * @param scope current scope + * @param tensors The tensors value + * @param unpaddedSizes The unpaddedSizes value + * @return a new instance of TPUCopyWithDynamicShape + */ + @Endpoint( + describeByClass = true + ) + public static TPUCopyWithDynamicShape create(Scope scope, Iterable> tensors, + Iterable> unpaddedSizes) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TPUCopyWithDynamicShape"); + opBuilder.addInputList(Operands.asOutputs(tensors)); + opBuilder.addInputList(Operands.asOutputs(unpaddedSizes)); + return new TPUCopyWithDynamicShape(opBuilder.build()); + } + + /** + * Gets tpuTensors. + * + * @return tpuTensors. + */ + public List> tpuTensors() { + return tpuTensors; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) tpuTensors.iterator(); + } + + @OpInputsMetadata( + outputsClass = TPUCopyWithDynamicShape.class + ) + public static class Inputs extends RawOpInputs { + /** + * The tensors input + */ + public final Iterable> tensors; + + /** + * The unpaddedSizes input + */ + public final Iterable> unpaddedSizes; + + /** + * The T attribute + */ + public final DataType[] T; + + public Inputs(GraphOperation op) { + super(new TPUCopyWithDynamicShape(op), op, Arrays.asList("T")); + int inputIndex = 0; + int tensorsLength = op.inputListLength("tensors"); + tensors = Arrays.asList((Operand[]) op.inputList(inputIndex, tensorsLength)); + inputIndex += tensorsLength; + int unpaddedSizesLength = op.inputListLength("unpadded_sizes"); + unpaddedSizes = Arrays.asList((Operand[]) op.inputList(inputIndex, unpaddedSizesLength)); + inputIndex += unpaddedSizesLength; + T = op.attributes().getAttrTypeList("T"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java index de0a62dc4aa..29a1e429e53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** @@ -46,6 +47,9 @@ inputsClass = TPUEmbeddingActivations.Inputs.class ) @Deprecated +@Operator( + group = "tpu" +) public final class TPUEmbeddingActivations extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java index bf2065bdb0d..1e5fab82839 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; /** * Metadata indicating how the TPU computation should be replicated. @@ -40,6 +41,9 @@ inputsClass = TPUReplicateMetadata.Inputs.class ) @Deprecated +@Operator( + group = "tpu" +) public final class TPUReplicateMetadata extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java index 92b9ab4f117..80ac7e3ea03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -46,8 +47,6 @@ * *

    The above computation has a replicated input of two replicas. * - * @param data type for {@code output} output - * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedInput} instead */ @OpMetadata( @@ -55,6 +54,9 @@ inputsClass = TPUReplicatedInput.Inputs.class ) @Deprecated +@Operator( + group = "tpu" +) public final class TPUReplicatedInput extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java index e10330b839d..dcc1b12b2b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -45,8 +46,6 @@ * *

    ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyRmsProp.OP_NAME, @@ -207,7 +205,7 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * If `True`, updating of the var, ms, and mom tensors is protected + * If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java index f73f7c8927d..17560573705 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java @@ -56,8 +56,6 @@ *

    NOTE: {@code train.BatchMatMul} supports broadcasting in the batch dimensions. More * about broadcasting * here . - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatMul.OP_NAME, @@ -108,6 +106,12 @@ public static BatchMatMul create(Scope scope, Operand(opBuilder.build()); @@ -133,6 +137,26 @@ public static Options adjY(Boolean adjY) { return new Options().adjY(adjY); } + /** + * Sets the gradX option. + * + * @param gradX the gradX option + * @return this Options instance. + */ + public static Options gradX(Boolean gradX) { + return new Options().gradX(gradX); + } + + /** + * Sets the gradY option. + * + * @param gradY the gradY option + * @return this Options instance. + */ + public static Options gradY(Boolean gradY) { + return new Options().gradY(gradY); + } + /** * Gets output. * 3-D or higher with shape {@code [..., r_o, c_o]} @@ -155,6 +179,10 @@ public static class Options { private Boolean adjY; + private Boolean gradX; + + private Boolean gradY; + private Options() { } @@ -179,6 +207,28 @@ public Options adjY(Boolean adjY) { this.adjY = adjY; return this; } + + /** + * Sets the gradX option. + * + * @param gradX the gradX option + * @return this Options instance. + */ + public Options gradX(Boolean gradX) { + this.gradX = gradX; + return this; + } + + /** + * Sets the gradY option. + * + * @param gradY the gradY option + * @return this Options instance. + */ + public Options gradY(Boolean gradY) { + this.gradY = gradY; + return this; + } } @OpInputsMetadata( @@ -211,17 +261,27 @@ public static class Inputs extends RawOpInputs> { public final DataType Tout; /** - * If `True`, adjoint the slices of `x`. Defaults to `False`. + * If {@code True}, adjoint the slices of {@code x}. Defaults to {@code False}. */ public final boolean adjX; /** - * If `True`, adjoint the slices of `y`. Defaults to `False`. + * If {@code True}, adjoint the slices of {@code y}. Defaults to {@code False}. */ public final boolean adjY; + /** + * The gradX attribute + */ + public final boolean gradX; + + /** + * The gradY attribute + */ + public final boolean gradY; + public Inputs(GraphOperation op) { - super(new BatchMatMul<>(op), op, Arrays.asList("Ta", "Tb", "Tout", "adj_x", "adj_y")); + super(new BatchMatMul<>(op), op, Arrays.asList("Ta", "Tb", "Tout", "adj_x", "adj_y", "grad_x", "grad_y")); int inputIndex = 0; x = (Operand) op.input(inputIndex++); y = (Operand) op.input(inputIndex++); @@ -230,6 +290,8 @@ public Inputs(GraphOperation op) { Tout = op.attributes().getAttrType("Tout"); adjX = op.attributes().getAttrBool("adj_x"); adjY = op.attributes().getAttrBool("adj_y"); + gradX = op.attributes().getAttrBool("grad_x"); + gradY = op.attributes().getAttrBool("grad_y"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java index 45232b68afd..1e6930d5410 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = ComputeBatchSize.OP_NAME, inputsClass = ComputeBatchSize.Inputs.class ) +@Operator( + group = "train" +) public final class ComputeBatchSize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/DistributedSave.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/DistributedSave.java index 7046d3980a8..5572cc4a03f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/DistributedSave.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/DistributedSave.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,6 +39,9 @@ opType = DistributedSave.OP_NAME, inputsClass = DistributedSave.Inputs.class ) +@Operator( + group = "train" +) public final class DistributedSave extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java index a7181e6cb0b..c98b11d0050 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java @@ -41,8 +41,6 @@ * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PreventGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java index ffaa98b7117..a13e18fe22a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = ResourceAccumulatorApplyGradient.OP_NAME, inputsClass = ResourceAccumulatorApplyGradient.Inputs.class ) +@Operator( + group = "train" +) public final class ResourceAccumulatorApplyGradient extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java index 040494fc7be..639f54c9b2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,6 +40,9 @@ opType = ResourceAccumulatorNumAccumulated.OP_NAME, inputsClass = ResourceAccumulatorNumAccumulated.Inputs.class ) +@Operator( + group = "train" +) public final class ResourceAccumulatorNumAccumulated extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java index a590fb51675..9b73fa3b03d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -40,6 +41,9 @@ opType = ResourceAccumulatorSetGlobalStep.OP_NAME, inputsClass = ResourceAccumulatorSetGlobalStep.Inputs.class ) +@Operator( + group = "train" +) public final class ResourceAccumulatorSetGlobalStep extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java index f494f6c7987..843ecae89f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -41,13 +42,14 @@ * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * - * @param data type for {@code average} output */ @OpMetadata( opType = ResourceAccumulatorTakeGradient.OP_NAME, inputsClass = ResourceAccumulatorTakeGradient.Inputs.class ) +@Operator( + group = "train" +) public final class ResourceAccumulatorTakeGradient extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java index 0f32bcb8f9d..470a8f8414c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java @@ -28,6 +28,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +42,9 @@ opType = ResourceApplyAdaMax.OP_NAME, inputsClass = ResourceApplyAdaMax.Inputs.class ) +@Operator( + group = "train" +) public final class ResourceApplyAdaMax extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -185,7 +189,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java index 681db0f553e..1db5e0b46f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java @@ -192,7 +192,7 @@ public static class Inputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputs extends RawOpInputsThe {@code shape_and_slice} input has the same format as the * elements of the {@code shapes_and_slices} input of the {@code SaveSlices} op. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = RestoreSlice.OP_NAME, @@ -178,7 +176,7 @@ public static class Inputs extends RawOpInputs> { /** * Index of file to open first if multiple files match - * `file_pattern`. See the documentation for `Restore`. + * {@code file_pattern}. See the documentation for {@code Restore}. */ public final long preferredShard; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java index f03ddc35c32..e06ddf1dae1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -54,6 +55,9 @@ opType = SdcaOptimizer.OP_NAME, inputsClass = SdcaOptimizer.Inputs.class ) +@Operator( + group = "train" +) public final class SdcaOptimizer extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java index c68618fecc1..8b12e83f51f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java @@ -36,8 +36,6 @@ /** * var: Should be from a Variable(). - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdadelta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java index 0742d6de727..fbda4c582a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -38,13 +39,14 @@ * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad * grad$$ * $$var -= lr * grad * (1 / sqrt(accum))$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdagrad.OP_NAME, inputsClass = SparseApplyAdagrad.Inputs.class ) +@Operator( + group = "train" +) public final class SparseApplyAdagrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -216,7 +218,7 @@ public static class Inputs extends RawOpInputs data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdagradDa.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java index aa940b94ea0..cfbf01b8044 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java @@ -49,8 +49,6 @@ *

    $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyCenteredRmsProp.OP_NAME, @@ -233,7 +231,7 @@ public static class Inputs extends RawOpInputs data type for {@code out} output */ @OpMetadata( opType = SparseApplyFtrl.OP_NAME, @@ -254,7 +252,7 @@ public static class Inputs extends RawOpInputsThat is for rows we have grad for, we update var and accum as follows: *

    $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyMomentum.OP_NAME, @@ -225,14 +223,14 @@ public static class Inputs extends RawOpInputsl2) * max{|prox_v|-lrl1,0}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyProximalAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java index 08b098f80ca..3da972089e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java @@ -39,8 +39,6 @@ * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha * grad$$ * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyProximalGradientDescent.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java index 3a71d3826c4..3c642ebcf81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java @@ -44,8 +44,6 @@ *

    $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyRmsProp.OP_NAME, @@ -220,7 +218,7 @@ public static class Inputs extends RawOpInputs data type for {@code output} output */ @OpMetadata( opType = TileGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java index ef35e623500..c58943ff50d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java @@ -30,6 +30,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -69,6 +70,9 @@ opType = AssignVariableConcatND.OP_NAME, inputsClass = AssignVariableConcatND.Inputs.class ) +@Operator( + group = "xla" +) public final class AssignVariableConcatND extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -84,18 +88,8 @@ public AssignVariableConcatND(Operation operation) { * * @param scope current scope * @param resource Resource variable for concatenated input tensors across all dimensions. - * } - * in_arg { - * name: "inputs" - * description: <<END - * Input tensor slices in row-major order to merge across all dimensions. All + * @param inputs Input tensor slices in row-major order to merge across all dimensions. All * inputs must have the same shape. - * } - * out_arg { - * name: "output" - * description: <<END - * Output tensor formed from merging input slices based on num_concats defined. - * @param inputs The inputs value * @param numConcats Number of ways to merge per dimension. * @param options carries optional attribute values * @return a new instance of AssignVariableConcatND @@ -193,22 +187,12 @@ public Options paddings(Long... paddings) { public static class Inputs extends RawOpInputs { /** * Resource variable for concatenated input tensors across all dimensions. - * } - * in_arg { - * name: "inputs" - * description: <<END - * Input tensor slices in row-major order to merge across all dimensions. All - * inputs must have the same shape. - * } - * out_arg { - * name: "output" - * description: <<END - * Output tensor formed from merging input slices based on num_concats defined. */ public final Operand resource; /** - * The inputs input + * Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. */ public final Iterable> inputs; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java index 64bcaa1effb..5749305af89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -65,13 +66,14 @@ * [4, 5, 6], * [8, 9, 10]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ConcatND.OP_NAME, inputsClass = ConcatND.Inputs.class ) +@Operator( + group = "xla" +) public final class ConcatND extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -92,11 +94,6 @@ public ConcatND(Operation operation) { * @param scope current scope * @param inputs Input tensor slices in row-major order to merge across all dimensions. All * inputs must have the same shape. - * } - * out_arg { - * name: "output" - * description: <<END - * Output tensor formed from merging input slices based on num_concats defined. * @param numConcats Number of ways to merge per dimension. * @param options carries optional attribute values * @param data type for {@code XlaConcatND} output and operands @@ -154,7 +151,7 @@ public static Options paddings(Long... paddings) { /** * Gets output. - * + * Output tensor formed from merging input slices based on num_concats defined. * @return output. */ public Output output() { @@ -209,11 +206,6 @@ public static class Inputs extends RawOpInputs> { /** * Input tensor slices in row-major order to merge across all dimensions. All * inputs must have the same shape. - * } - * out_arg { - * name: "output" - * description: <<END - * Output tensor formed from merging input slices based on num_concats defined. */ public final Iterable> inputs; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java index 2839535884d..9788f2927f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java @@ -32,6 +32,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -66,13 +67,14 @@ * [[8, 0], * [0, 0]] * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = ReadVariableSplitND.OP_NAME, inputsClass = ReadVariableSplitND.Inputs.class ) +@Operator( + group = "xla" +) public final class ReadVariableSplitND extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -95,11 +97,6 @@ public ReadVariableSplitND(Operation operation) { * * @param scope current scope * @param resource Resource variable of input tensor to split across all dimensions. - * } - * out_arg { - * name: "outputs" - * description: <<END - * Output slices based on input and num_splits defined, in row-major order. * @param T The value of the T attribute * @param N The value of the N attribute * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly @@ -161,7 +158,7 @@ public static Options paddings(Long... paddings) { /** * Gets outputs. - * + * Output slices based on input and num_splits defined, in row-major order. * @return outputs. */ public List> outputs() { @@ -214,11 +211,6 @@ public Options paddings(Long... paddings) { public static class Inputs extends RawOpInputs> { /** * Resource variable of input tensor to split across all dimensions. - * } - * out_arg { - * name: "outputs" - * description: <<END - * Output slices based on input and num_splits defined, in row-major order. */ public final Operand resource; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java index ee8a3a6b29d..299b2f95437 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java @@ -31,6 +31,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -65,13 +66,14 @@ * [[8, 0], * [0, 0]] * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = SplitND.OP_NAME, inputsClass = SplitND.Inputs.class ) +@Operator( + group = "xla" +) public final class SplitND extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -94,11 +96,6 @@ public SplitND(Operation operation) { * * @param scope current scope * @param input Input tensor to split across all dimensions. - * } - * out_arg { - * name: "outputs" - * description: <<END - * Output slices based on input and num_splits defined, in row-major order. * @param N The value of the N attribute * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly * divisible. @@ -157,7 +154,7 @@ public static Options paddings(Long... paddings) { /** * Gets outputs. - * + * Output slices based on input and num_splits defined, in row-major order. * @return outputs. */ public List> outputs() { @@ -210,11 +207,6 @@ public Options paddings(Long... paddings) { public static class Inputs extends RawOpInputs> { /** * Input tensor to split across all dimensions. - * } - * out_arg { - * name: "outputs" - * description: <<END - * Output slices based on input and num_splits defined, in row-major order. */ public final Operand input; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java index 3c1fbfdd0ea..0b25f9f064a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java @@ -245,12 +245,12 @@ public static class Inputs extends RawOpInputs { public final Iterable> inputs; /** - * The element types of each element in `inputs`. + * The element types of each element in {@code inputs}. */ public final DataType[] Tinputs; /** - * The element types of each element in `outputs`. + * The element types of each element in {@code outputs}. */ public final DataType[] Toutputs; @@ -261,7 +261,7 @@ public static class Inputs extends RawOpInputs { public final String[] ancestors; /** - * If shape_inference_graph is empty, a list of the shapes of `outputs`. + * If shape_inference_graph is empty, a list of the shapes of {@code outputs}. */ public final Shape[] shapes; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java index c8d5507a673..b05f7199f7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java @@ -41,8 +41,6 @@ * Toutput: element type for output. * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = XlaRecvFromHost.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingActivations.java index 7aa0bedf6a4..b3499a237f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingActivations.java @@ -49,7 +49,7 @@ public final class XlaRecvTPUEmbeddingActivations extends RawOp implements Itera /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "XlaRecvTPUEmbeddingActivations"; + public static final String OP_NAME = "XlaRecvTPUEmbeddingActivationsV2"; private List> outputs; @@ -63,7 +63,7 @@ public XlaRecvTPUEmbeddingActivations(Operation operation) { } /** - * Factory method to create a class wrapping a new XlaRecvTPUEmbeddingActivations operation. + * Factory method to create a class wrapping a new XlaRecvTPUEmbeddingActivationsV2 operation. * * @param scope current scope * @param deduplicationData A Tensor with type=DT_VARIANT containing the deduplication @@ -76,17 +76,24 @@ public XlaRecvTPUEmbeddingActivations(Operation operation) { * present in the tpu embedding config, it is equal to the number of features * otherwise equal to number of embedding tables in the model. * @param config Serialized TPUEmbeddingConfiguration proto. + * @param embeddingPartitions Serialized EmbeddingPartitionsProto proto. + * @param hbmBuffersConfig Serialized HbmBuffersConfig proto. + * @param tpuTopology Serialized TpuTopologyArgsProto proto. * @return a new instance of XlaRecvTPUEmbeddingActivations */ @Endpoint( describeByClass = true ) public static XlaRecvTPUEmbeddingActivations create(Scope scope, - Operand deduplicationData, Long numTables, String config) { + Operand deduplicationData, Long numTables, String config, + String embeddingPartitions, String hbmBuffersConfig, String tpuTopology) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaRecvTPUEmbeddingActivations"); opBuilder.addInput(deduplicationData.asOutput()); opBuilder.setAttr("num_tables", numTables); opBuilder.setAttr("config", config); + opBuilder.setAttr("embedding_partitions", embeddingPartitions); + opBuilder.setAttr("hbm_buffers_config", hbmBuffersConfig); + opBuilder.setAttr("tpu_topology", tpuTopology); return new XlaRecvTPUEmbeddingActivations(opBuilder.build()); } @@ -125,11 +132,29 @@ public static class Inputs extends RawOpInputs { */ public final String config; + /** + * Serialized EmbeddingPartitionsProto proto. + */ + public final String embeddingPartitions; + + /** + * Serialized HbmBuffersConfig proto. + */ + public final String hbmBuffersConfig; + + /** + * Serialized TpuTopologyArgsProto proto. + */ + public final String tpuTopology; + public Inputs(GraphOperation op) { - super(new XlaRecvTPUEmbeddingActivations(op), op, Arrays.asList("config")); + super(new XlaRecvTPUEmbeddingActivations(op), op, Arrays.asList("config", "embedding_partitions", "hbm_buffers_config", "tpu_topology")); int inputIndex = 0; deduplicationData = (Operand) op.input(inputIndex++); config = op.attributes().getAttrString("config"); + embeddingPartitions = op.attributes().getAttrString("embedding_partitions"); + hbmBuffersConfig = op.attributes().getAttrString("hbm_buffers_config"); + tpuTopology = op.attributes().getAttrString("tpu_topology"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingDeduplicationData.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingDeduplicationData.java index 8c0e285d222..a0c18fb338c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingDeduplicationData.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvTPUEmbeddingDeduplicationData.java @@ -48,7 +48,7 @@ public final class XlaRecvTPUEmbeddingDeduplicationData extends RawOp implements /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "XlaRecvTPUEmbeddingDeduplicationData"; + public static final String OP_NAME = "XlaRecvTPUEmbeddingDeduplicationDataV2"; private Output output; @@ -60,18 +60,25 @@ public XlaRecvTPUEmbeddingDeduplicationData(Operation operation) { } /** - * Factory method to create a class wrapping a new XlaRecvTPUEmbeddingDeduplicationData operation. + * Factory method to create a class wrapping a new XlaRecvTPUEmbeddingDeduplicationDataV2 operation. * * @param scope current scope * @param config Serialized TPUEmbeddingConfiguration proto. + * @param embeddingPartitions Serialized EmbeddingPartitionsProto proto. + * @param hbmBuffersConfig Serialized HbmBuffersConfig proto. + * @param tpuTopology Serialized TpuTopologyArgsProto proto. * @return a new instance of XlaRecvTPUEmbeddingDeduplicationData */ @Endpoint( describeByClass = true ) - public static XlaRecvTPUEmbeddingDeduplicationData create(Scope scope, String config) { + public static XlaRecvTPUEmbeddingDeduplicationData create(Scope scope, String config, + String embeddingPartitions, String hbmBuffersConfig, String tpuTopology) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaRecvTPUEmbeddingDeduplicationData"); opBuilder.setAttr("config", config); + opBuilder.setAttr("embedding_partitions", embeddingPartitions); + opBuilder.setAttr("hbm_buffers_config", hbmBuffersConfig); + opBuilder.setAttr("tpu_topology", tpuTopology); return new XlaRecvTPUEmbeddingDeduplicationData(opBuilder.build()); } @@ -99,10 +106,28 @@ public static class Inputs extends RawOpInputs> gradients, Iterable> learningRates, - Operand deduplicationData, String config, Options... options) { + Operand deduplicationData, String config, String embeddingPartitions, + String hbmBuffersConfig, String tpuTopology, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSendTPUEmbeddingGradients"); opBuilder.addInputList(Operands.asOutputs(gradients)); opBuilder.addInputList(Operands.asOutputs(learningRates)); opBuilder.addInput(deduplicationData.asOutput()); opBuilder.setAttr("config", config); + opBuilder.setAttr("embedding_partitions", embeddingPartitions); + opBuilder.setAttr("hbm_buffers_config", hbmBuffersConfig); + opBuilder.setAttr("tpu_topology", tpuTopology); if (options != null) { for (Options opts : options) { if (opts.NumLearningRateTags != null) { @@ -157,8 +164,23 @@ public static class Inputs extends RawOpInputs { */ public final String config; + /** + * Serialized EmbeddingPartitionsProto proto. + */ + public final String embeddingPartitions; + + /** + * Serialized HbmBuffersConfig proto. + */ + public final String hbmBuffersConfig; + + /** + * Serialized TpuTopologyArgsProto proto. + */ + public final String tpuTopology; + public Inputs(GraphOperation op) { - super(new XlaSendTPUEmbeddingGradients(op), op, Arrays.asList("config")); + super(new XlaSendTPUEmbeddingGradients(op), op, Arrays.asList("config", "embedding_partitions", "hbm_buffers_config", "tpu_topology")); int inputIndex = 0; int gradientsLength = op.inputListLength("gradients"); gradients = Arrays.asList((Operand[]) op.inputList(inputIndex, gradientsLength)); @@ -168,6 +190,9 @@ public Inputs(GraphOperation op) { inputIndex += learningRatesLength; deduplicationData = (Operand) op.input(inputIndex++); config = op.attributes().getAttrString("config"); + embeddingPartitions = op.attributes().getAttrString("embedding_partitions"); + hbmBuffersConfig = op.attributes().getAttrString("hbm_buffers_config"); + tpuTopology = op.attributes().getAttrString("tpu_topology"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagrad.java new file mode 100644 index 00000000000..f4e7db1771e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagrad.java @@ -0,0 +1,154 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseCoreAdagrad operation + */ +@OpMetadata( + opType = XlaSparseCoreAdagrad.OP_NAME, + inputsClass = XlaSparseCoreAdagrad.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseCoreAdagrad extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseCoreAdagrad"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + public XlaSparseCoreAdagrad(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseCoreAdagrad operation. + * + * @param scope current scope + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param accumulator The accumulator value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @return a new instance of XlaSparseCoreAdagrad + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseCoreAdagrad create(Scope scope, Operand indices, + Operand gradient, Operand learningRate, Operand accumulator, + Operand embeddingTable, Long featureWidth) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseCoreAdagrad"); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(gradient.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.setAttr("feature_width", featureWidth); + return new XlaSparseCoreAdagrad(opBuilder.build()); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + @OpInputsMetadata( + outputsClass = XlaSparseCoreAdagrad.class + ) + public static class Inputs extends RawOpInputs { + /** + * The indices input + */ + public final Operand indices; + + /** + * The gradient input + */ + public final Operand gradient; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + public Inputs(GraphOperation op) { + super(new XlaSparseCoreAdagrad(op), op, Arrays.asList("feature_width")); + int inputIndex = 0; + indices = (Operand) op.input(inputIndex++); + gradient = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + featureWidth = op.attributes().getAttrInt("feature_width"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagradMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagradMomentum.java new file mode 100644 index 00000000000..d65be317989 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdagradMomentum.java @@ -0,0 +1,216 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseCoreAdagradMomentum operation + */ +@OpMetadata( + opType = XlaSparseCoreAdagradMomentum.OP_NAME, + inputsClass = XlaSparseCoreAdagradMomentum.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseCoreAdagradMomentum extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseCoreAdagradMomentum"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedMomentum; + + public XlaSparseCoreAdagradMomentum(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedMomentum = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseCoreAdagradMomentum operation. + * + * @param scope current scope + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param beta1 The beta1 value + * @param epsilon The epsilon value + * @param accumulator The accumulator value + * @param momentum The momentum value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @param useNesterov The value of the useNesterov attribute + * @param beta2 The value of the beta2 attribute + * @param exponent The value of the exponent attribute + * @return a new instance of XlaSparseCoreAdagradMomentum + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseCoreAdagradMomentum create(Scope scope, Operand indices, + Operand gradient, Operand learningRate, Operand beta1, + Operand epsilon, Operand accumulator, Operand momentum, + Operand embeddingTable, Long featureWidth, Boolean useNesterov, Float beta2, + Float exponent) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseCoreAdagradMomentum"); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(gradient.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(beta1.asOutput()); + opBuilder.addInput(epsilon.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(momentum.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("use_nesterov", useNesterov); + opBuilder.setAttr("beta_2", beta2); + opBuilder.setAttr("exponent", exponent); + return new XlaSparseCoreAdagradMomentum(opBuilder.build()); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedMomentum. + * + * @return updatedMomentum. + */ + public Output updatedMomentum() { + return updatedMomentum; + } + + @OpInputsMetadata( + outputsClass = XlaSparseCoreAdagradMomentum.class + ) + public static class Inputs extends RawOpInputs { + /** + * The indices input + */ + public final Operand indices; + + /** + * The gradient input + */ + public final Operand gradient; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The beta1 input + */ + public final Operand beta1; + + /** + * The epsilon input + */ + public final Operand epsilon; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The momentum input + */ + public final Operand momentum; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The useNesterov attribute + */ + public final boolean useNesterov; + + /** + * The beta2 attribute + */ + public final float beta2; + + /** + * The exponent attribute + */ + public final float exponent; + + public Inputs(GraphOperation op) { + super(new XlaSparseCoreAdagradMomentum(op), op, Arrays.asList("feature_width", "use_nesterov", "beta_2", "exponent")); + int inputIndex = 0; + indices = (Operand) op.input(inputIndex++); + gradient = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + beta1 = (Operand) op.input(inputIndex++); + epsilon = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + momentum = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + featureWidth = op.attributes().getAttrInt("feature_width"); + useNesterov = op.attributes().getAttrBool("use_nesterov"); + beta2 = op.attributes().getAttrFloat("beta_2"); + exponent = op.attributes().getAttrFloat("exponent"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdam.java new file mode 100644 index 00000000000..8ec32200fad --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreAdam.java @@ -0,0 +1,208 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseCoreAdam operation + */ +@OpMetadata( + opType = XlaSparseCoreAdam.OP_NAME, + inputsClass = XlaSparseCoreAdam.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseCoreAdam extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseCoreAdam"; + + private Output updatedEmbeddingTable; + + private Output updatedVelocity; + + private Output updatedMomentum; + + public XlaSparseCoreAdam(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedVelocity = operation.output(outputIdx++); + updatedMomentum = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseCoreAdam operation. + * + * @param scope current scope + * @param embeddingTable The embeddingTable value + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param momentum The momentum value + * @param velocity The velocity value + * @param beta1 The beta1 value + * @param beta2 The beta2 value + * @param epsilon The epsilon value + * @param featureWidth The value of the featureWidth attribute + * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute + * @return a new instance of XlaSparseCoreAdam + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseCoreAdam create(Scope scope, Operand embeddingTable, + Operand indices, Operand gradient, Operand learningRate, + Operand momentum, Operand velocity, Operand beta1, + Operand beta2, Operand epsilon, Long featureWidth, + Boolean useSumInsideSqrt) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseCoreAdam"); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(gradient.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(momentum.asOutput()); + opBuilder.addInput(velocity.asOutput()); + opBuilder.addInput(beta1.asOutput()); + opBuilder.addInput(beta2.asOutput()); + opBuilder.addInput(epsilon.asOutput()); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("use_sum_inside_sqrt", useSumInsideSqrt); + return new XlaSparseCoreAdam(opBuilder.build()); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedVelocity. + * + * @return updatedVelocity. + */ + public Output updatedVelocity() { + return updatedVelocity; + } + + /** + * Gets updatedMomentum. + * + * @return updatedMomentum. + */ + public Output updatedMomentum() { + return updatedMomentum; + } + + @OpInputsMetadata( + outputsClass = XlaSparseCoreAdam.class + ) + public static class Inputs extends RawOpInputs { + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The indices input + */ + public final Operand indices; + + /** + * The gradient input + */ + public final Operand gradient; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The momentum input + */ + public final Operand momentum; + + /** + * The velocity input + */ + public final Operand velocity; + + /** + * The beta1 input + */ + public final Operand beta1; + + /** + * The beta2 input + */ + public final Operand beta2; + + /** + * The epsilon input + */ + public final Operand epsilon; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The useSumInsideSqrt attribute + */ + public final boolean useSumInsideSqrt; + + public Inputs(GraphOperation op) { + super(new XlaSparseCoreAdam(op), op, Arrays.asList("feature_width", "use_sum_inside_sqrt")); + int inputIndex = 0; + embeddingTable = (Operand) op.input(inputIndex++); + indices = (Operand) op.input(inputIndex++); + gradient = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + momentum = (Operand) op.input(inputIndex++); + velocity = (Operand) op.input(inputIndex++); + beta1 = (Operand) op.input(inputIndex++); + beta2 = (Operand) op.input(inputIndex++); + epsilon = (Operand) op.input(inputIndex++); + featureWidth = op.attributes().getAttrInt("feature_width"); + useSumInsideSqrt = op.attributes().getAttrBool("use_sum_inside_sqrt"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreFtrl.java new file mode 100644 index 00000000000..5ab961123b4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreFtrl.java @@ -0,0 +1,216 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseCoreFtrl operation + */ +@OpMetadata( + opType = XlaSparseCoreFtrl.OP_NAME, + inputsClass = XlaSparseCoreFtrl.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseCoreFtrl extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseCoreFtrl"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedLinear; + + public XlaSparseCoreFtrl(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedLinear = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseCoreFtrl operation. + * + * @param scope current scope + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param linear The linear value + * @param learningRate The learningRate value + * @param indices The indices value + * @param gradient The gradient value + * @param beta The beta value + * @param learningRatePower The learningRatePower value + * @param l2RegularizationStrength The l2RegularizationStrength value + * @param featureWidth The value of the featureWidth attribute + * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute + * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute + * @return a new instance of XlaSparseCoreFtrl + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseCoreFtrl create(Scope scope, Operand embeddingTable, + Operand accumulator, Operand linear, Operand learningRate, + Operand indices, Operand gradient, Operand beta, + Operand learningRatePower, Operand l2RegularizationStrength, + Long featureWidth, Boolean multiplyLinearByLearningRate, Float l1RegularizationStrength) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseCoreFtrl"); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(linear.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(gradient.asOutput()); + opBuilder.addInput(beta.asOutput()); + opBuilder.addInput(learningRatePower.asOutput()); + opBuilder.addInput(l2RegularizationStrength.asOutput()); + opBuilder.setAttr("feature_width", featureWidth); + opBuilder.setAttr("multiply_linear_by_learning_rate", multiplyLinearByLearningRate); + opBuilder.setAttr("l1_regularization_strength", l1RegularizationStrength); + return new XlaSparseCoreFtrl(opBuilder.build()); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedLinear. + * + * @return updatedLinear. + */ + public Output updatedLinear() { + return updatedLinear; + } + + @OpInputsMetadata( + outputsClass = XlaSparseCoreFtrl.class + ) + public static class Inputs extends RawOpInputs { + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The linear input + */ + public final Operand linear; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The indices input + */ + public final Operand indices; + + /** + * The gradient input + */ + public final Operand gradient; + + /** + * The beta input + */ + public final Operand beta; + + /** + * The learningRatePower input + */ + public final Operand learningRatePower; + + /** + * The l2RegularizationStrength input + */ + public final Operand l2RegularizationStrength; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + /** + * The multiplyLinearByLearningRate attribute + */ + public final boolean multiplyLinearByLearningRate; + + /** + * The l1RegularizationStrength attribute + */ + public final float l1RegularizationStrength; + + public Inputs(GraphOperation op) { + super(new XlaSparseCoreFtrl(op), op, Arrays.asList("feature_width", "multiply_linear_by_learning_rate", "l1_regularization_strength")); + int inputIndex = 0; + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + linear = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + indices = (Operand) op.input(inputIndex++); + gradient = (Operand) op.input(inputIndex++); + beta = (Operand) op.input(inputIndex++); + learningRatePower = (Operand) op.input(inputIndex++); + l2RegularizationStrength = (Operand) op.input(inputIndex++); + featureWidth = op.attributes().getAttrInt("feature_width"); + multiplyLinearByLearningRate = op.attributes().getAttrBool("multiply_linear_by_learning_rate"); + l1RegularizationStrength = op.attributes().getAttrFloat("l1_regularization_strength"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreSgd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreSgd.java new file mode 100644 index 00000000000..70830868226 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseCoreSgd.java @@ -0,0 +1,139 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseCoreSgd operation + */ +@OpMetadata( + opType = XlaSparseCoreSgd.OP_NAME, + inputsClass = XlaSparseCoreSgd.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseCoreSgd extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseCoreSgd"; + + private Output updatedEmbeddingTable; + + public XlaSparseCoreSgd(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseCoreSgd operation. + * + * @param scope current scope + * @param indices The indices value + * @param gradient The gradient value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param featureWidth The value of the featureWidth attribute + * @return a new instance of XlaSparseCoreSgd + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseCoreSgd create(Scope scope, Operand indices, + Operand gradient, Operand learningRate, Operand embeddingTable, + Long featureWidth) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseCoreSgd"); + opBuilder.addInput(indices.asOutput()); + opBuilder.addInput(gradient.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.setAttr("feature_width", featureWidth); + return new XlaSparseCoreSgd(opBuilder.build()); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + @Override + public Output asOutput() { + return updatedEmbeddingTable; + } + + @OpInputsMetadata( + outputsClass = XlaSparseCoreSgd.class + ) + public static class Inputs extends RawOpInputs { + /** + * The indices input + */ + public final Operand indices; + + /** + * The gradient input + */ + public final Operand gradient; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The featureWidth attribute + */ + public final long featureWidth; + + public Inputs(GraphOperation op) { + super(new XlaSparseCoreSgd(op), op, Arrays.asList("feature_width")); + int inputIndex = 0; + indices = (Operand) op.input(inputIndex++); + gradient = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + featureWidth = op.attributes().getAttrInt("feature_width"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmul.java new file mode 100644 index 00000000000..0d3fab06b13 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmul.java @@ -0,0 +1,208 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TType; + +/** + * The XlaSparseDenseMatmul operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmul.OP_NAME, + inputsClass = XlaSparseDenseMatmul.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmul extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmul"; + + private Output activations; + + private Output rowPointers; + + private Output sortedEmbeddingIds; + + private Output sortedSampleIds; + + private Output sortedGains; + + public XlaSparseDenseMatmul(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + activations = operation.output(outputIdx++); + rowPointers = operation.output(outputIdx++); + sortedEmbeddingIds = operation.output(outputIdx++); + sortedSampleIds = operation.output(outputIdx++); + sortedGains = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmul operation. + * + * @param scope current scope + * @param rowIds The rowIds value + * @param colIds The colIds value + * @param values The values value + * @param offsets The offsets value + * @param embeddingTable The embeddingTable value + * @param maxIdsPerPartition The value of the maxIdsPerPartition attribute + * @param maxUniqueIdsPerPartition The value of the maxUniqueIdsPerPartition attribute + * @param inputSize The value of the inputSize attribute + * @return a new instance of XlaSparseDenseMatmul + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmul create(Scope scope, Operand rowIds, + Operand colIds, Operand values, Operand offsets, + Operand embeddingTable, Long maxIdsPerPartition, Long maxUniqueIdsPerPartition, + Long inputSize) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmul"); + opBuilder.addInput(rowIds.asOutput()); + opBuilder.addInput(colIds.asOutput()); + opBuilder.addInput(values.asOutput()); + opBuilder.addInput(offsets.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.setAttr("max_ids_per_partition", maxIdsPerPartition); + opBuilder.setAttr("max_unique_ids_per_partition", maxUniqueIdsPerPartition); + opBuilder.setAttr("input_size", inputSize); + return new XlaSparseDenseMatmul(opBuilder.build()); + } + + /** + * Gets activations. + * + * @return activations. + */ + public Output activations() { + return activations; + } + + /** + * Gets rowPointers. + * + * @return rowPointers. + */ + public Output rowPointers() { + return rowPointers; + } + + /** + * Gets sortedEmbeddingIds. + * + * @return sortedEmbeddingIds. + */ + public Output sortedEmbeddingIds() { + return sortedEmbeddingIds; + } + + /** + * Gets sortedSampleIds. + * + * @return sortedSampleIds. + */ + public Output sortedSampleIds() { + return sortedSampleIds; + } + + /** + * Gets sortedGains. + * + * @return sortedGains. + */ + public Output sortedGains() { + return sortedGains; + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmul.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowIds input + */ + public final Operand rowIds; + + /** + * The colIds input + */ + public final Operand colIds; + + /** + * The values input + */ + public final Operand values; + + /** + * The offsets input + */ + public final Operand offsets; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The maxIdsPerPartition attribute + */ + public final long maxIdsPerPartition; + + /** + * The maxUniqueIdsPerPartition attribute + */ + public final long maxUniqueIdsPerPartition; + + /** + * The inputSize attribute + */ + public final long inputSize; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmul(op), op, Arrays.asList("max_ids_per_partition", "max_unique_ids_per_partition", "input_size")); + int inputIndex = 0; + rowIds = (Operand) op.input(inputIndex++); + colIds = (Operand) op.input(inputIndex++); + values = (Operand) op.input(inputIndex++); + offsets = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + maxIdsPerPartition = op.attributes().getAttrInt("max_ids_per_partition"); + maxUniqueIdsPerPartition = op.attributes().getAttrInt("max_unique_ids_per_partition"); + inputSize = op.attributes().getAttrInt("input_size"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.java new file mode 100644 index 00000000000..b63cba97719 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.java @@ -0,0 +1,266 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdagradAndCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdagradAndCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdagradAndCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulGradWithAdagradAndCsrInput extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdagradAndCsrInput"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + public XlaSparseDenseMatmulGradWithAdagradAndCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdagradAndCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradAndCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdagradAndCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand numMinibatchesPerPhysicalSparseCore, + String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdagradAndCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdagradAndCsrInput(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradAndCsrInput} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdagradAndCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdagradAndCsrInput(op), op, Arrays.asList("clip_weight_min", "clip_weight_max", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.java new file mode 100644 index 00000000000..f8ab65a6ee1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.java @@ -0,0 +1,279 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + public XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand numMinibatchesPerPhysicalSparseCore, + Long maxIdsPerSparseCore, Long maxUniqueIdsPerSparseCore, String tableName, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize(op), op, Arrays.asList("clip_weight_min", "clip_weight_max", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.java new file mode 100644 index 00000000000..faa117af196 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.java @@ -0,0 +1,327 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedMomenta; + + public XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedMomenta = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param momenta The momenta value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useNesterov The value of the useNesterov attribute + * @param exponent The value of the exponent attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand momenta, + Operand numMinibatchesPerPhysicalSparseCore, Boolean useNesterov, Float exponent, + Float beta1, Float beta2, Float epsilon, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(momenta.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("use_nesterov", useNesterov); + opBuilder.setAttr("exponent", exponent); + opBuilder.setAttr("beta1", beta1); + opBuilder.setAttr("beta2", beta2); + opBuilder.setAttr("epsilon", epsilon); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedMomenta. + * + * @return updatedMomenta. + */ + public Output updatedMomenta() { + return updatedMomenta; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The momenta input + */ + public final Operand momenta; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The useNesterov attribute + */ + public final boolean useNesterov; + + /** + * The exponent attribute + */ + public final float exponent; + + /** + * The beta1 attribute + */ + public final float beta1; + + /** + * The beta2 attribute + */ + public final float beta2; + + /** + * The epsilon attribute + */ + public final float epsilon; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput(op), op, Arrays.asList("use_nesterov", "exponent", "beta1", "beta2", "epsilon", "clip_weight_min", "clip_weight_max", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + momenta = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + useNesterov = op.attributes().getAttrBool("use_nesterov"); + exponent = op.attributes().getAttrFloat("exponent"); + beta1 = op.attributes().getAttrFloat("beta1"); + beta2 = op.attributes().getAttrFloat("beta2"); + epsilon = op.attributes().getAttrFloat("epsilon"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.java new file mode 100644 index 00000000000..e007641d72a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.java @@ -0,0 +1,340 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedMomenta; + + public XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedMomenta = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param momenta The momenta value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useNesterov The value of the useNesterov attribute + * @param exponent The value of the exponent attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand momenta, + Operand numMinibatchesPerPhysicalSparseCore, Boolean useNesterov, Float exponent, + Float beta1, Float beta2, Float epsilon, Long maxIdsPerSparseCore, + Long maxUniqueIdsPerSparseCore, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(momenta.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("use_nesterov", useNesterov); + opBuilder.setAttr("exponent", exponent); + opBuilder.setAttr("beta1", beta1); + opBuilder.setAttr("beta2", beta2); + opBuilder.setAttr("epsilon", epsilon); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedMomenta. + * + * @return updatedMomenta. + */ + public Output updatedMomenta() { + return updatedMomenta; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The momenta input + */ + public final Operand momenta; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The useNesterov attribute + */ + public final boolean useNesterov; + + /** + * The exponent attribute + */ + public final float exponent; + + /** + * The beta1 attribute + */ + public final float beta1; + + /** + * The beta2 attribute + */ + public final float beta2; + + /** + * The epsilon attribute + */ + public final float epsilon; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize(op), op, Arrays.asList("use_nesterov", "exponent", "beta1", "beta2", "epsilon", "clip_weight_min", "clip_weight_max", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + momenta = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + useNesterov = op.attributes().getAttrBool("use_nesterov"); + exponent = op.attributes().getAttrFloat("exponent"); + beta1 = op.attributes().getAttrFloat("beta1"); + beta2 = op.attributes().getAttrFloat("beta2"); + epsilon = op.attributes().getAttrFloat("epsilon"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndCsrInput.java new file mode 100644 index 00000000000..a39190054b3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndCsrInput.java @@ -0,0 +1,319 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdamAndCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdamAndCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdamAndCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulGradWithAdamAndCsrInput extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdamAndCsrInput"; + + private Output updatedEmbeddingTable; + + private Output updatedMomenta; + + private Output updatedVelocity; + + public XlaSparseDenseMatmulGradWithAdamAndCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedMomenta = operation.output(outputIdx++); + updatedVelocity = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdamAndCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param momenta The momenta value + * @param velocity The velocity value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdamAndCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdamAndCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, Operand momenta, + Operand velocity, Operand numMinibatchesPerPhysicalSparseCore, + Boolean useSumInsideSqrt, Float beta1, Float beta2, Float epsilon, String tableName, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdamAndCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(momenta.asOutput()); + opBuilder.addInput(velocity.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("use_sum_inside_sqrt", useSumInsideSqrt); + opBuilder.setAttr("beta1", beta1); + opBuilder.setAttr("beta2", beta2); + opBuilder.setAttr("epsilon", epsilon); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdamAndCsrInput(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedMomenta. + * + * @return updatedMomenta. + */ + public Output updatedMomenta() { + return updatedMomenta; + } + + /** + * Gets updatedVelocity. + * + * @return updatedVelocity. + */ + public Output updatedVelocity() { + return updatedVelocity; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdamAndCsrInput} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdamAndCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The momenta input + */ + public final Operand momenta; + + /** + * The velocity input + */ + public final Operand velocity; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The useSumInsideSqrt attribute + */ + public final boolean useSumInsideSqrt; + + /** + * The beta1 attribute + */ + public final float beta1; + + /** + * The beta2 attribute + */ + public final float beta2; + + /** + * The epsilon attribute + */ + public final float epsilon; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdamAndCsrInput(op), op, Arrays.asList("use_sum_inside_sqrt", "beta1", "beta2", "epsilon", "clip_weight_min", "clip_weight_max", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + momenta = (Operand) op.input(inputIndex++); + velocity = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + useSumInsideSqrt = op.attributes().getAttrBool("use_sum_inside_sqrt"); + beta1 = op.attributes().getAttrFloat("beta1"); + beta2 = op.attributes().getAttrFloat("beta2"); + epsilon = op.attributes().getAttrFloat("epsilon"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.java new file mode 100644 index 00000000000..fe875c67f69 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.java @@ -0,0 +1,331 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize"; + + private Output updatedEmbeddingTable; + + private Output updatedMomenta; + + private Output updatedVelocity; + + public XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedMomenta = operation.output(outputIdx++); + updatedVelocity = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param momenta The momenta value + * @param velocity The velocity value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute + * @param beta1 The value of the beta1 attribute + * @param beta2 The value of the beta2 attribute + * @param epsilon The value of the epsilon attribute + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, Operand momenta, + Operand velocity, Operand numMinibatchesPerPhysicalSparseCore, + Boolean useSumInsideSqrt, Float beta1, Float beta2, Float epsilon, Long maxIdsPerSparseCore, + Long maxUniqueIdsPerSparseCore, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(momenta.asOutput()); + opBuilder.addInput(velocity.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("use_sum_inside_sqrt", useSumInsideSqrt); + opBuilder.setAttr("beta1", beta1); + opBuilder.setAttr("beta2", beta2); + opBuilder.setAttr("epsilon", epsilon); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedMomenta. + * + * @return updatedMomenta. + */ + public Output updatedMomenta() { + return updatedMomenta; + } + + /** + * Gets updatedVelocity. + * + * @return updatedVelocity. + */ + public Output updatedVelocity() { + return updatedVelocity; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The momenta input + */ + public final Operand momenta; + + /** + * The velocity input + */ + public final Operand velocity; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The useSumInsideSqrt attribute + */ + public final boolean useSumInsideSqrt; + + /** + * The beta1 attribute + */ + public final float beta1; + + /** + * The beta2 attribute + */ + public final float beta2; + + /** + * The epsilon attribute + */ + public final float epsilon; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize(op), op, Arrays.asList("use_sum_inside_sqrt", "beta1", "beta2", "epsilon", "clip_weight_min", "clip_weight_max", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + momenta = (Operand) op.input(inputIndex++); + velocity = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + useSumInsideSqrt = op.attributes().getAttrBool("use_sum_inside_sqrt"); + beta1 = op.attributes().getAttrFloat("beta1"); + beta2 = op.attributes().getAttrFloat("beta2"); + epsilon = op.attributes().getAttrFloat("epsilon"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithCsrInput.java new file mode 100644 index 00000000000..7ac92263e93 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithCsrInput.java @@ -0,0 +1,184 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.ConcreteFunction; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithCsrInput.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithCsrInput extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithCsrInput"; + + private List> updatedTables; + + @SuppressWarnings("unchecked") + public XlaSparseDenseMatmulGradWithCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int updatedTablesLength = operation.outputListLength("updated_tables"); + updatedTables = Arrays.asList((Output[]) operation.outputList(outputIdx, updatedTablesLength)); + outputIdx += updatedTablesLength; + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param tables The tables value + * @param hyperparameters The hyperparameters value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param customComputation The value of the customComputation attribute + * @param tableName The value of the tableName attribute + * @return a new instance of XlaSparseDenseMatmulGradWithCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Iterable> tables, Iterable> hyperparameters, + Operand numMinibatchesPerPhysicalSparseCore, ConcreteFunction customComputation, + String tableName) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInputList(Operands.asOutputs(tables)); + opBuilder.addInputList(Operands.asOutputs(hyperparameters)); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("custom_computation", customComputation); + opBuilder.setAttr("table_name", tableName); + return new XlaSparseDenseMatmulGradWithCsrInput(opBuilder.build()); + } + + /** + * Gets updatedTables. + * + * @return updatedTables. + */ + public List> updatedTables() { + return updatedTables; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) updatedTables.iterator(); + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The tables input + */ + public final Iterable> tables; + + /** + * The hyperparameters input + */ + public final Iterable> hyperparameters; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithCsrInput(op), op, Arrays.asList("table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + int tablesLength = op.inputListLength("tables"); + tables = Arrays.asList((Operand[]) op.inputList(inputIndex, tablesLength)); + inputIndex += tablesLength; + int hyperparametersLength = op.inputListLength("hyperparameters"); + hyperparameters = Arrays.asList((Operand[]) op.inputList(inputIndex, hyperparametersLength)); + inputIndex += hyperparametersLength; + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.java new file mode 100644 index 00000000000..b8d005b7059 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.java @@ -0,0 +1,328 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithFtrlAndCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithFtrlAndCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithFtrlAndCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulGradWithFtrlAndCsrInput extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithFtrlAndCsrInput"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedLinear; + + public XlaSparseDenseMatmulGradWithFtrlAndCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedLinear = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithFtrlAndCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param linear The linear value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute + * @param beta The value of the beta attribute + * @param learningRatePower The value of the learningRatePower attribute + * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute + * @param l2RegularizationStrength The value of the l2RegularizationStrength attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithFtrlAndCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithFtrlAndCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand linear, + Operand numMinibatchesPerPhysicalSparseCore, Boolean multiplyLinearByLearningRate, + Float beta, Float learningRatePower, Float l1RegularizationStrength, + Float l2RegularizationStrength, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithFtrlAndCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(linear.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("multiply_linear_by_learning_rate", multiplyLinearByLearningRate); + opBuilder.setAttr("beta", beta); + opBuilder.setAttr("learning_rate_power", learningRatePower); + opBuilder.setAttr("l1_regularization_strength", l1RegularizationStrength); + opBuilder.setAttr("l2_regularization_strength", l2RegularizationStrength); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithFtrlAndCsrInput(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedLinear. + * + * @return updatedLinear. + */ + public Output updatedLinear() { + return updatedLinear; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithFtrlAndCsrInput} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithFtrlAndCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The linear input + */ + public final Operand linear; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The multiplyLinearByLearningRate attribute + */ + public final boolean multiplyLinearByLearningRate; + + /** + * The beta attribute + */ + public final float beta; + + /** + * The learningRatePower attribute + */ + public final float learningRatePower; + + /** + * The l1RegularizationStrength attribute + */ + public final float l1RegularizationStrength; + + /** + * The l2RegularizationStrength attribute + */ + public final float l2RegularizationStrength; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithFtrlAndCsrInput(op), op, Arrays.asList("multiply_linear_by_learning_rate", "beta", "learning_rate_power", "l1_regularization_strength", "l2_regularization_strength", "clip_weight_min", "clip_weight_max", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + linear = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + multiplyLinearByLearningRate = op.attributes().getAttrBool("multiply_linear_by_learning_rate"); + beta = op.attributes().getAttrFloat("beta"); + learningRatePower = op.attributes().getAttrFloat("learning_rate_power"); + l1RegularizationStrength = op.attributes().getAttrFloat("l1_regularization_strength"); + l2RegularizationStrength = op.attributes().getAttrFloat("l2_regularization_strength"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.java new file mode 100644 index 00000000000..7bfa0c2cc45 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.java @@ -0,0 +1,341 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize"; + + private Output updatedEmbeddingTable; + + private Output updatedAccumulator; + + private Output updatedLinear; + + public XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + updatedAccumulator = operation.output(outputIdx++); + updatedLinear = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param accumulator The accumulator value + * @param linear The linear value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute + * @param beta The value of the beta attribute + * @param learningRatePower The value of the learningRatePower attribute + * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute + * @param l2RegularizationStrength The value of the l2RegularizationStrength attribute + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand accumulator, Operand linear, + Operand numMinibatchesPerPhysicalSparseCore, Boolean multiplyLinearByLearningRate, + Float beta, Float learningRatePower, Float l1RegularizationStrength, + Float l2RegularizationStrength, Long maxIdsPerSparseCore, Long maxUniqueIdsPerSparseCore, + String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(accumulator.asOutput()); + opBuilder.addInput(linear.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("multiply_linear_by_learning_rate", multiplyLinearByLearningRate); + opBuilder.setAttr("beta", beta); + opBuilder.setAttr("learning_rate_power", learningRatePower); + opBuilder.setAttr("l1_regularization_strength", l1RegularizationStrength); + opBuilder.setAttr("l2_regularization_strength", l2RegularizationStrength); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + /** + * Gets updatedAccumulator. + * + * @return updatedAccumulator. + */ + public Output updatedAccumulator() { + return updatedAccumulator; + } + + /** + * Gets updatedLinear. + * + * @return updatedLinear. + */ + public Output updatedLinear() { + return updatedLinear; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The accumulator input + */ + public final Operand accumulator; + + /** + * The linear input + */ + public final Operand linear; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The multiplyLinearByLearningRate attribute + */ + public final boolean multiplyLinearByLearningRate; + + /** + * The beta attribute + */ + public final float beta; + + /** + * The learningRatePower attribute + */ + public final float learningRatePower; + + /** + * The l1RegularizationStrength attribute + */ + public final float l1RegularizationStrength; + + /** + * The l2RegularizationStrength attribute + */ + public final float l2RegularizationStrength; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize(op), op, Arrays.asList("multiply_linear_by_learning_rate", "beta", "learning_rate_power", "l1_regularization_strength", "l2_regularization_strength", "clip_weight_min", "clip_weight_max", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + accumulator = (Operand) op.input(inputIndex++); + linear = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + multiplyLinearByLearningRate = op.attributes().getAttrBool("multiply_linear_by_learning_rate"); + beta = op.attributes().getAttrFloat("beta"); + learningRatePower = op.attributes().getAttrFloat("learning_rate_power"); + l1RegularizationStrength = op.attributes().getAttrFloat("l1_regularization_strength"); + l2RegularizationStrength = op.attributes().getAttrFloat("l2_regularization_strength"); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndCsrInput.java new file mode 100644 index 00000000000..bfb8a3a127f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndCsrInput.java @@ -0,0 +1,250 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithSgdAndCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithSgdAndCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithSgdAndCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulGradWithSgdAndCsrInput extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithSgdAndCsrInput"; + + private Output updatedEmbeddingTable; + + public XlaSparseDenseMatmulGradWithSgdAndCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithSgdAndCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithSgdAndCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithSgdAndCsrInput create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithSgdAndCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithSgdAndCsrInput(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + @Override + public Output asOutput() { + return updatedEmbeddingTable; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithSgdAndCsrInput} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithSgdAndCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithSgdAndCsrInput(op), op, Arrays.asList("clip_weight_min", "clip_weight_max", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.java new file mode 100644 index 00000000000..65c059d2821 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.java @@ -0,0 +1,263 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize"; + + private Output updatedEmbeddingTable; + + public XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + updatedEmbeddingTable = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param activationGradients The activationGradients value + * @param learningRate The learningRate value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @param options carries optional attribute values + * @return a new instance of XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand activationGradients, + Operand learningRate, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, Long maxIdsPerSparseCore, + Long maxUniqueIdsPerSparseCore, String tableName, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(activationGradients.asOutput()); + opBuilder.addInput(learningRate.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + if (options != null) { + for (Options opts : options) { + if (opts.clipWeightMin != null) { + opBuilder.setAttr("clip_weight_min", opts.clipWeightMin); + } + if (opts.clipWeightMax != null) { + opBuilder.setAttr("clip_weight_max", opts.clipWeightMax); + } + } + } + return new XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize(opBuilder.build()); + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public static Options clipWeightMin(Float clipWeightMin) { + return new Options().clipWeightMin(clipWeightMin); + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public static Options clipWeightMax(Float clipWeightMax) { + return new Options().clipWeightMax(clipWeightMax); + } + + /** + * Gets updatedEmbeddingTable. + * + * @return updatedEmbeddingTable. + */ + public Output updatedEmbeddingTable() { + return updatedEmbeddingTable; + } + + @Override + public Output asOutput() { + return updatedEmbeddingTable; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize} + */ + public static class Options { + private Float clipWeightMin; + + private Float clipWeightMax; + + private Options() { + } + + /** + * Sets the clipWeightMin option. + * + * @param clipWeightMin the clipWeightMin option + * @return this Options instance. + */ + public Options clipWeightMin(Float clipWeightMin) { + this.clipWeightMin = clipWeightMin; + return this; + } + + /** + * Sets the clipWeightMax option. + * + * @param clipWeightMax the clipWeightMax option + * @return this Options instance. + */ + public Options clipWeightMax(Float clipWeightMax) { + this.clipWeightMax = clipWeightMax; + return this; + } + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The activationGradients input + */ + public final Operand activationGradients; + + /** + * The learningRate input + */ + public final Operand learningRate; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The clipWeightMin attribute + */ + public final float clipWeightMin; + + /** + * The clipWeightMax attribute + */ + public final float clipWeightMax; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize(op), op, Arrays.asList("clip_weight_min", "clip_weight_max", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + activationGradients = (Operand) op.input(inputIndex++); + learningRate = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + clipWeightMin = op.attributes().getAttrFloat("clip_weight_min"); + clipWeightMax = op.attributes().getAttrFloat("clip_weight_max"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithCsrInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithCsrInput.java new file mode 100644 index 00000000000..793c460676f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithCsrInput.java @@ -0,0 +1,190 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulWithCsrInput operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulWithCsrInput.OP_NAME, + inputsClass = XlaSparseDenseMatmulWithCsrInput.Inputs.class +) +@Operator( + group = "xla" +) +public final class XlaSparseDenseMatmulWithCsrInput extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulWithCsrInput"; + + private Output activations; + + public XlaSparseDenseMatmulWithCsrInput(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + activations = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulWithCsrInput operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param inputSize The value of the inputSize attribute + * @param quantizationConfigLow The value of the quantizationConfigLow attribute + * @param quantizationConfigHigh The value of the quantizationConfigHigh attribute + * @param quantizationConfigNumBuckets The value of the quantizationConfigNumBuckets attribute + * @param tableName The value of the tableName attribute + * @return a new instance of XlaSparseDenseMatmulWithCsrInput + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulWithCsrInput create(Scope scope, Operand rowPointers, + Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, Long inputSize, + Float quantizationConfigLow, Float quantizationConfigHigh, Long quantizationConfigNumBuckets, + String tableName) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulWithCsrInput"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("input_size", inputSize); + opBuilder.setAttr("quantization_config_low", quantizationConfigLow); + opBuilder.setAttr("quantization_config_high", quantizationConfigHigh); + opBuilder.setAttr("quantization_config_num_buckets", quantizationConfigNumBuckets); + opBuilder.setAttr("table_name", tableName); + return new XlaSparseDenseMatmulWithCsrInput(opBuilder.build()); + } + + /** + * Gets activations. + * + * @return activations. + */ + public Output activations() { + return activations; + } + + @Override + public Output asOutput() { + return activations; + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulWithCsrInput.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The inputSize attribute + */ + public final long inputSize; + + /** + * The quantizationConfigLow attribute + */ + public final float quantizationConfigLow; + + /** + * The quantizationConfigHigh attribute + */ + public final float quantizationConfigHigh; + + /** + * The quantizationConfigNumBuckets attribute + */ + public final long quantizationConfigNumBuckets; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulWithCsrInput(op), op, Arrays.asList("input_size", "quantization_config_low", "quantization_config_high", "quantization_config_num_buckets", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + inputSize = op.attributes().getAttrInt("input_size"); + quantizationConfigLow = op.attributes().getAttrFloat("quantization_config_low"); + quantizationConfigHigh = op.attributes().getAttrFloat("quantization_config_high"); + quantizationConfigNumBuckets = op.attributes().getAttrInt("quantization_config_num_buckets"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithStaticBufferSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithStaticBufferSize.java new file mode 100644 index 00000000000..268a9b0fc4b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSparseDenseMatmulWithStaticBufferSize.java @@ -0,0 +1,202 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt32; + +/** + * The XlaSparseDenseMatmulWithStaticBufferSize operation + */ +@OpMetadata( + opType = XlaSparseDenseMatmulWithStaticBufferSize.OP_NAME, + inputsClass = XlaSparseDenseMatmulWithStaticBufferSize.Inputs.class +) +public final class XlaSparseDenseMatmulWithStaticBufferSize extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSparseDenseMatmulWithStaticBufferSize"; + + private Output activations; + + public XlaSparseDenseMatmulWithStaticBufferSize(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + activations = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaSparseDenseMatmulWithStaticBufferSize operation. + * + * @param scope current scope + * @param rowPointers The rowPointers value + * @param sortedSampleIds The sortedSampleIds value + * @param sortedTokenIds The sortedTokenIds value + * @param sortedGains The sortedGains value + * @param embeddingTable The embeddingTable value + * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value + * @param inputSize The value of the inputSize attribute + * @param quantizationConfigLow The value of the quantizationConfigLow attribute + * @param quantizationConfigHigh The value of the quantizationConfigHigh attribute + * @param quantizationConfigNumBuckets The value of the quantizationConfigNumBuckets attribute + * @param maxIdsPerSparseCore The value of the maxIdsPerSparseCore attribute + * @param maxUniqueIdsPerSparseCore The value of the maxUniqueIdsPerSparseCore attribute + * @param tableName The value of the tableName attribute + * @return a new instance of XlaSparseDenseMatmulWithStaticBufferSize + */ + @Endpoint( + describeByClass = true + ) + public static XlaSparseDenseMatmulWithStaticBufferSize create(Scope scope, + Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds, + Operand sortedGains, Operand embeddingTable, + Operand numMinibatchesPerPhysicalSparseCore, Long inputSize, + Float quantizationConfigLow, Float quantizationConfigHigh, Long quantizationConfigNumBuckets, + Long maxIdsPerSparseCore, Long maxUniqueIdsPerSparseCore, String tableName) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaSparseDenseMatmulWithStaticBufferSize"); + opBuilder.addInput(rowPointers.asOutput()); + opBuilder.addInput(sortedSampleIds.asOutput()); + opBuilder.addInput(sortedTokenIds.asOutput()); + opBuilder.addInput(sortedGains.asOutput()); + opBuilder.addInput(embeddingTable.asOutput()); + opBuilder.addInput(numMinibatchesPerPhysicalSparseCore.asOutput()); + opBuilder.setAttr("input_size", inputSize); + opBuilder.setAttr("quantization_config_low", quantizationConfigLow); + opBuilder.setAttr("quantization_config_high", quantizationConfigHigh); + opBuilder.setAttr("quantization_config_num_buckets", quantizationConfigNumBuckets); + opBuilder.setAttr("max_ids_per_sparse_core", maxIdsPerSparseCore); + opBuilder.setAttr("max_unique_ids_per_sparse_core", maxUniqueIdsPerSparseCore); + opBuilder.setAttr("table_name", tableName); + return new XlaSparseDenseMatmulWithStaticBufferSize(opBuilder.build()); + } + + /** + * Gets activations. + * + * @return activations. + */ + public Output activations() { + return activations; + } + + @Override + public Output asOutput() { + return activations; + } + + @OpInputsMetadata( + outputsClass = XlaSparseDenseMatmulWithStaticBufferSize.class + ) + public static class Inputs extends RawOpInputs { + /** + * The rowPointers input + */ + public final Operand rowPointers; + + /** + * The sortedSampleIds input + */ + public final Operand sortedSampleIds; + + /** + * The sortedTokenIds input + */ + public final Operand sortedTokenIds; + + /** + * The sortedGains input + */ + public final Operand sortedGains; + + /** + * The embeddingTable input + */ + public final Operand embeddingTable; + + /** + * The numMinibatchesPerPhysicalSparseCore input + */ + public final Operand numMinibatchesPerPhysicalSparseCore; + + /** + * The inputSize attribute + */ + public final long inputSize; + + /** + * The quantizationConfigLow attribute + */ + public final float quantizationConfigLow; + + /** + * The quantizationConfigHigh attribute + */ + public final float quantizationConfigHigh; + + /** + * The quantizationConfigNumBuckets attribute + */ + public final long quantizationConfigNumBuckets; + + /** + * The maxIdsPerSparseCore attribute + */ + public final long maxIdsPerSparseCore; + + /** + * The maxUniqueIdsPerSparseCore attribute + */ + public final long maxUniqueIdsPerSparseCore; + + /** + * The tableName attribute + */ + public final String tableName; + + public Inputs(GraphOperation op) { + super(new XlaSparseDenseMatmulWithStaticBufferSize(op), op, Arrays.asList("input_size", "quantization_config_low", "quantization_config_high", "quantization_config_num_buckets", "max_ids_per_sparse_core", "max_unique_ids_per_sparse_core", "table_name")); + int inputIndex = 0; + rowPointers = (Operand) op.input(inputIndex++); + sortedSampleIds = (Operand) op.input(inputIndex++); + sortedTokenIds = (Operand) op.input(inputIndex++); + sortedGains = (Operand) op.input(inputIndex++); + embeddingTable = (Operand) op.input(inputIndex++); + numMinibatchesPerPhysicalSparseCore = (Operand) op.input(inputIndex++); + inputSize = op.attributes().getAttrInt("input_size"); + quantizationConfigLow = op.attributes().getAttrFloat("quantization_config_low"); + quantizationConfigHigh = op.attributes().getAttrFloat("quantization_config_high"); + quantizationConfigNumBuckets = op.attributes().getAttrInt("quantization_config_num_buckets"); + maxIdsPerSparseCore = op.attributes().getAttrInt("max_ids_per_sparse_core"); + maxUniqueIdsPerSparseCore = op.attributes().getAttrInt("max_unique_ids_per_sparse_core"); + tableName = op.attributes().getAttrString("table_name"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/module-info.java b/tensorflow-core/tensorflow-core-api/src/main/java/module-info.java index 9bd62485a8e..b12e7042b48 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/module-info.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/module-info.java @@ -14,6 +14,8 @@ limitations under the License. ======================================================================= */ + +/** Core module implementing the TensorFlow Java API and operator definitions. */ module tensorflow { requires transitive org.tensorflow.ndarray; requires transitive tensorflow.nativelib; @@ -34,7 +36,6 @@ exports org.tensorflow.op.data.experimental; exports org.tensorflow.op.debugging; exports org.tensorflow.op.dtypes; - exports org.tensorflow.op.estimator; exports org.tensorflow.op.image; exports org.tensorflow.op.io; exports org.tensorflow.op.linalg; @@ -45,7 +46,6 @@ exports org.tensorflow.op.quantization; exports org.tensorflow.op.ragged; exports org.tensorflow.op.random; - exports org.tensorflow.op.risc; exports org.tensorflow.op.signal; exports org.tensorflow.op.sparse; exports org.tensorflow.op.strings; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index 62b93bcf219..84fc4d7fe9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -1,4 +1,4 @@ -/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -115,7 +115,7 @@ public Options devicePlacementPolicy(DevicePlacementPolicy value) { * * @param config a config protocol buffer * @see config.proto + * href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto">config.proto */ public Options config(ConfigProto config) { this.config = config; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java index 245fff70d1a..4dbde8f2473 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/NativeFunction.java @@ -20,9 +20,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import java.util.ArrayDeque; -import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -92,7 +90,7 @@ public synchronized List getDependencies() { } }); } - dependencies = Collections.unmodifiableList(new ArrayList<>(deps)); + dependencies = List.copyOf(deps); } return dependencies; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java index 9f63f3e7fea..dbbb1ab759d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java @@ -1,4 +1,4 @@ -/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -250,9 +250,8 @@ public Exporter withSignature(Signature signature) { /** * Add multiple signatures to the model. Wraps {@link #withSignature(Signature)} * - *

    Either {@link #withSession(Session)} or {@link * #withFunction(SessionFunction)} must - * be called before this method, and the session set there will be used for these - * signatures. + *

    Either {@link #withSession(Session)} or {@link #withFunction(SessionFunction)} must be + * called before this method, and the session set there will be used for these signatures. * * @throws IllegalStateException if no session has been set * @return this @@ -313,6 +312,25 @@ public void export() throws IOException { private final Map functions = new LinkedHashMap<>(); } + /** + * Load a saved model from an export directory. The model that is being loaded should be created + * using the Saved Model + * API. + * + *

    This method is a shorthand for: + * + *

    {@code
    +   * SavedModelBundle.loader().load();
    +   * }
    + * + * @param exportDir the directory path containing a saved model. + * @return a bundle containing the graph and associated session. + */ + public static SavedModelBundle load(String exportDir) { + Loader loader = loader(exportDir); + return loader.load(); + } + /** * Load a saved model from an export directory. The model that is being loaded should be created * using the Saved Model @@ -330,9 +348,7 @@ public void export() throws IOException { */ public static SavedModelBundle load(String exportDir, String... tags) { Loader loader = loader(exportDir); - if (tags != null && tags.length > 0) { - loader.withTags(tags); - } + loader.withTags(tags); return loader.load(); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java index 5b9f2f1985e..80d99f227ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java @@ -221,8 +221,8 @@ public final class Runner { * Avoid evaluating {@code operation} and substitute {@code t} for the value it produces. * * @param operation Is either the string name of the operation, in which case this method is a - * shorthand for {@code feed(operation, 0)}, or it is a string of the form - * operation_name:output_index , in which case this method acts like {@code + * shorthand for {@code feed(operation, 0)}, or it is a string of the form {@code + * operation_name:output_index}, in which case this method acts like {@code * feed(operation_name, output_index)}. These colon-separated names are commonly used in the * {@code SignatureDef} protocol buffer messages that are included in {@link * SavedModelBundle#metaGraphDef()}. @@ -283,8 +283,8 @@ public Runner feed(Operand operand, Tensor t) { *

    If the output is a resource variable, will fetch the value. * * @param operation Is either the string name of the operation, in which case this method is a - * shorthand for {@code fetch(operation, 0)}, or it is a string of the form - * operation_name:output_index , in which case this method acts like {@code + * shorthand for {@code fetch(operation, 0)}, or it is a string of the form {@code + * operation_name:output_index}, in which case this method acts like {@code * fetch(operation_name, output_index)}. These colon-separated names are commonly used in * the {@code SignatureDef} protocol buffer messages that are included in {@link * SavedModelBundle#metaGraphDef()}. @@ -403,7 +403,7 @@ public Runner fetch(Operand operand) { * Tensors}. * * @param operation Is either the string name of the operation or it is a string of the form - * operation_name:output_index, where output_index will simply be ignored. + * {@code operation_name:output_index}, where {@code output_index} will simply be ignored. * @return this session runner * @throws IllegalArgumentException if no operation exists with the provided name */ @@ -467,16 +467,7 @@ public boolean isEmpty() { * Execute the graph fragments necessary to compute all requested fetches. * *

    WARNING: The caller assumes ownership of all returned {@link Tensor Tensors}, i.e., - * the caller must call {@link Tensor#close} on all elements of the returned list to free up - * resources. - * - *

    TODO(ashankar): Reconsider the return type here. Two things in particular: (a) Make it - * easier for the caller to cleanup (perhaps returning something like AutoCloseableList in - * SessionTest.java), and (b) Evaluate whether the return value should be a list, or maybe a - * {@code Map}? - * - *

    TODO(andrewmyers): It would also be good if whatever is returned here made it easier to - * extract output tensors in a type-safe way. + * the caller must call {@link Result#close} to free up resources. * * @return list of resulting tensors fetched by this session runner */ @@ -703,7 +694,7 @@ public synchronized void restore(String prefix) { markAllInitializersAsRan(); } - Graph graph() { + public Graph graph() { return graph; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index 9f524ef2544..0d6866c4e70 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -158,11 +158,15 @@ public Signature build() { return new Signature(key, signatureBuilder.build()); } - private static TensorInfo toTensorInfo(Output operand) { + static TensorInfo toTensorInfo(Output operand) { Shape shape = operand.shape(); TensorShapeProto.Builder tensorShapeBuilder = TensorShapeProto.newBuilder(); - for (int i = 0; i < shape.numDimensions(); ++i) { - tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.size(i))); + if (shape.isUnknown()) { + tensorShapeBuilder.setUnknownRank(true); + } else { + for (int i = 0; i < shape.numDimensions(); ++i) { + tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.get(i))); + } } return TensorInfo.newBuilder() .setDtype(operand.dataType()) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java index 4d9f2a453b7..0c243f4a2ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java @@ -1,4 +1,4 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2016-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -100,7 +100,7 @@ static T of(Class type, Shape shape, long size) { * *

    The amount of memory to allocate is derived from the datatype and the shape of the tensor. * Tensor data is initialized by calling the {@code dataInitializer}, which receives in argument - * the value returned by {@link #data()} on the allocated tensor. For example: + * the value returned by {@code data()} on the allocated tensor. For example: * *

    {@code
        * FloatNdArray data = ...
    @@ -207,10 +207,10 @@ static  T of(Class type, Shape shape, ByteDataBuffer rawData
       /**
        * Check if this tensor is sparse or not.
        *
    -   * 

    When this methods retuns {@code true}, the tensor could be cast to a {@link SparseTensor - * SparseTensor} to access its indices, values and denseShape tensors. + *

    When this method returns {@code true}, the tensor could be cast to a {@link SparseTensor} to + * access its indices, values and denseShape tensors. * - * @return true if this tensor is a sparse + * @return true if this tensor is a sparse tensor. */ default boolean isSparse() { return false; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java index 8660395b702..1abf8c115cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java @@ -49,8 +49,7 @@ public abstract class TensorMapper { * space. * @param tensorScope scope to extend to keep a reference on the sub-tensors composing this sparse * tensor - * @return an instance of {@code T}, that could also be casted to a {@link SparseTensor - * SparseTensor} + * @return an instance of {@code T}. */ protected abstract SparseTensor mapSparse( TInt64 indices, T values, TInt64 denseShape, PointerScope tensorScope); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java index bd886d776b7..3bf262bec14 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/buffer/ByteSequenceTensorBuffer.java @@ -23,13 +23,11 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_TString_GetSize; import java.nio.ReadOnlyBufferException; -import java.util.function.Function; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.Loader; import org.bytedeco.javacpp.Pointer; import org.bytedeco.javacpp.PointerScope; import org.tensorflow.internal.c_api.TF_TString; -import org.tensorflow.ndarray.NdArray; import org.tensorflow.ndarray.buffer.DataBuffer; import org.tensorflow.ndarray.impl.buffer.AbstractDataBuffer; import org.tensorflow.ndarray.impl.buffer.Validator; @@ -40,10 +38,9 @@ *

    The values are stored as an array of {@link TF_TString}, internally wrapped with {@code * tensorflow::tstring}, which is essentially a portable version of {@code std::string}. * - *

    The data of the buffer must be initialized only once, by calling {@link #init(NdArray, - * Function)}, and the buffer must have been allocated with enough space (use {@link - * #computeSize(NdArray, Function)} priory to know exactly how many bytes are required to store the - * data). + *

    The data of the buffer must be initialized only once, by calling {@link #init}, and the buffer + * must have been allocated with enough space (use {@link #computeSize} priory to know exactly how + * many bytes are required to store the data). * *

    After its data has been initialized, the buffer is read-only as it is not possible to change * safely a value without reinitializing the whole data. @@ -66,8 +63,8 @@ public static long computeSize(ByteSequenceProvider byteSequenceProvider) * *

    While it is not enforced programmatically, it is mandatory that this method is called only * once after the creation of the buffer. The buffer must have been allocated according to the - * same set of data, calling {@link #computeSize(NdArray, Function)} priory to make sure there is - * enough space to store it. + * same set of data, calling {@link #computeSize} priory to make sure there is enough space to + * store it. * * @param byteSequenceProvider produces sequences of bytes to use as the tensor data */ diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint16Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint16Mapper.java index d563302319a..43faa1199ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint16Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint16Mapper.java @@ -29,7 +29,7 @@ import org.tensorflow.types.TUint16; /** - * Maps memory of {@link org.tensorflow.proto.DataType#DT_Uint16} tensors to a n-dimensional data + * Maps memory of {@link org.tensorflow.proto.DataType#DT_UINT16} tensors to a n-dimensional data * space. */ public final class TUint16Mapper extends TensorMapper { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMask.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMask.java index 5c20bc7c9e4..f83cf577889 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMask.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMask.java @@ -78,7 +78,7 @@ public static Operand create( if (maskShape.numDimensions() == 0) { throw new IllegalArgumentException("Mask cannot be a scalar."); } - if (maskShape.hasUnknownDimension()) { + if (maskShape.isUnknown()) { throw new IllegalArgumentException("Mask cannot have unknown number of dimensions"); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMaskUpdate.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMaskUpdate.java index 81eb5c507ea..d402bda432a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMaskUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/BooleanMaskUpdate.java @@ -86,7 +86,7 @@ public static Operand create( if (maskShape.numDimensions() == 0) { throw new IllegalArgumentException("Mask cannot be a scalar."); } - if (maskShape.hasUnknownDimension()) { + if (maskShape.isUnknown()) { throw new IllegalArgumentException("Mask cannot have unknown number of dimensions"); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java index 246d222f303..b68bdbcd289 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java @@ -1329,11 +1329,7 @@ public static Constant tensorOfSameType( /** * Create a constant by making an immutable copy of {@code tensor}. {@code tensor} may be closed - * afterwards without issue. - * - *

    Note: this endpoint cannot be simply called {@code constant} since it will conflict with - * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, - * FloatNdArray)}}. + * afterward without issue. * * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java index 04d107baa75..94d279b52fa 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Function.java @@ -1,4 +1,4 @@ -/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,6 +27,9 @@ @Operator(name = "call") public abstract class Function { + /** Constructor. */ + public Function() {} + /** * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Shapes.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Shapes.java index a8cc8409785..309fd81f1e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Shapes.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Shapes.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -389,7 +389,8 @@ public static Operand head(Scope scope, Shape shape, C * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape */ @@ -404,7 +405,8 @@ public static Operand take(Scope scope, Shape shape, Operand the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -456,7 +458,8 @@ public static Operand tail(Scope scope, Shape shape, C * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of the * shape */ @@ -472,7 +475,8 @@ public static Operand takeLast( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @param type the shape datatype. * @param the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of the diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/StridedSliceHelper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/StridedSliceHelper.java index c3eccac6931..109c97c0247 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/StridedSliceHelper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/StridedSliceHelper.java @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020-2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -137,16 +137,17 @@ static StridedSliceArgs mergeIndexes(Index[] indices) { * equal to `n`, but this need not be the case. Each range specification entry can be one of the * following: * - *

    - An ellipsis (...) using {@link Indices#ellipsis()}. Ellipses are used to imply zero or - * more dimensions of full-dimension selection. For example, {@code stridedSlice(foo, - * Indices.ellipsis()} is the identity slice. + *

    - An ellipsis (...) using {@link org.tensorflow.ndarray.index.Indices#ellipsis()}. Ellipses + * are used to imply zero or more dimensions of full-dimension selection. For example, {@code + * stridedSlice(foo, Indices.ellipsis()} is the identity slice. * - *

    - A new axis using {@link Indices#newAxis()}. This is used to insert a new shape=1 - * dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} where {@code foo} is - * shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. + *

    - A new axis using {@link org.tensorflow.ndarray.index.Indices#newAxis()}. This is used to + * insert a new shape=1 dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} + * where {@code foo} is shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. * - *

    - A range {@code begin:end:stride} using {@link Indices#slice(Long, Long, long)} - * Index.slice()} or {@link Indices#all()}. This is used to specify how much to choose from a + *

    - A range {@code begin:end:stride} using {@link + * org.tensorflow.ndarray.index.Indices#slice(Long, Long, long)} Index.slice()} or {@link + * org.tensorflow.ndarray.index.Indices#all()}. This is used to specify how much to choose from a * given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer which * represents the index of the first value to select while {@code end} represents the index of the * last value to select (exclusive). Begin and end can be null, in which case the index begins or @@ -163,10 +164,11 @@ static StridedSliceArgs mergeIndexes(Index[] indices) { * elements). For example {@code foo = [1,2,3,4]; stridedSlice(foo, Indices.slice(-2, null, -1)} * is {@code [4,3]}. * - *

    - A single index using {@link Indices#at(long)}. This is used to keep only elements that - * have a given index. For example ({@code stridedSlice(foo, Indices.at(2))} on a shape {@code - * (5,6)} tensor produces a shape {@code (6,)} tensor. The dimension can be kept with size one - * using {@link Indices#at(long, boolean)}. + *

    - A single index using {@link org.tensorflow.ndarray.index.Indices#at(long)}. This is used + * to keep only elements that have a given index. For example ({@code stridedSlice(foo, + * Indices.at(2))} on a shape {@code (5,6)} tensor produces a shape {@code (6,)} tensor. The + * dimension can be kept with size one using {@link org.tensorflow.ndarray.index.Indices#at(long, + * boolean)}. * *

    These semantics generally follow NumPy's indexing semantics, which can be found here: https://numpy.org/doc/stable/reference/arrays.indexing.html @@ -175,9 +177,9 @@ static StridedSliceArgs mergeIndexes(Index[] indices) { * * @param scope current scope * @param data type for {@code output()} output - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSlice - * @see Indices + * @see org.tensorflow.ndarray.index.Indices */ @Endpoint(name = "stridedSlice") public static StridedSlice stridedSlice( @@ -210,9 +212,10 @@ public static StridedSlice stridedSlice( * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSliceAssign - * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) + * @see org.tensorflow.op.Ops#stridedSlice(org.tensorflow.Operand, + * org.tensorflow.ndarray.index.Index...) */ @Endpoint(name = "stridedSliceAssign") public static StridedSliceAssign stridedSliceAssign( diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/package-info.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/package-info.java index 983cda5260c..49cdef2a624 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/package-info.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/package-info.java @@ -16,10 +16,12 @@ /** * Defines classes to build, save, load and execute TensorFlow models. * - *

    WARNING: The API is currently experimental and is not covered by TensorFlow API stability guarantees. See README.md - * for installation instructions. + *

    API Stability: Since version 1.0.0, the TensorFlow Java API is covered by TensorFlow API stability guarantees. + * Please note that as this library is a wrapper for the TensorFlow C API, its stability is subject + * to the stability of the underlying upstream TensorFlow project. See the README.md for installation + * instructions. * *

    The LabelImage diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java index f7f29de8424..c511262e339 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java @@ -120,11 +120,11 @@ static TBfloat16 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java index ef218e6d24c..eee01cd0892 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -111,11 +111,11 @@ static TBool tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of {@code false}. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java index 10f995f88a5..8b590b58339 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java @@ -117,11 +117,11 @@ static TFloat16 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java index 7bbeab3701b..8b1bdf13639 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java @@ -105,11 +105,11 @@ static TFloat32 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java index 5c616232454..f76323d06d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java @@ -105,11 +105,11 @@ static TFloat64 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java index c3a54d59d36..d2fb4814a24 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java @@ -103,11 +103,11 @@ static TInt32 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java index 175188278a5..57741e22641 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -104,11 +104,11 @@ static TInt64 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java index eee64d53a50..14b438fbe76 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -131,8 +131,8 @@ static TString tensorOf(Shape shape, DataBuffer data) { *

    The data will be copied from the provided buffer to the tensor after it is allocated. The * strings are encoded into bytes using the charset passed in parameter. * - *

    If charset is different than default UTF-8, then it must also be provided explicitly when - * reading data from the tensor, using {@link #using(Charset)}: + *

    If charset is different from the default UTF-8, then it must also be provided explicitly + * when reading data from the tensor, using {@link #using(Charset)}: * *

    {@code
        * // Given `originalStrings` an initialized buffer of strings
    @@ -193,11 +193,11 @@ static TString tensorOfBytes(Shape shape, DataBuffer data) {
       }
     
       /**
    -   * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense
    +   * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense
        * tensors, with an empty string as the default value.
        *
    -   * 

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint16.java index 24263a48767..313df5f6ea1 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint16.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 The TensorFlow Authors. All Rights Reserved. + * Copyright 2022-2024 The TensorFlow Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -105,11 +105,11 @@ static TUint16 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java index 4507ae034eb..ff9d31a6fd2 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -104,11 +104,11 @@ static TUint8 tensorOf(Shape shape, Consumer dataInit) { } /** - * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * Create a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense * tensors, with a default value of zero. * - *

    The returned instance also implements the {@link SparseTensor SparseTensor} - * interface, allowing a user to access directly the dense tensors when needed. + *

    The returned instance also implements the {@link SparseTensor} interface, allowing a user to + * access directly the dense tensors when needed. * * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the * elements in the sparse tensor that contain non-default values (elements are zero-indexed). diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java index 4b452984574..5eb3bf71660 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java @@ -68,6 +68,11 @@ public class SavedModelBundleTest { @Test public void load() { + try (SavedModelBundle bundle = SavedModelBundle.load(SAVED_MODEL_PATH)) { + assertNotNull(bundle.session()); + assertNotNull(bundle.graph()); + assertNotNull(bundle.metaGraphDef()); + } try (SavedModelBundle bundle = SavedModelBundle.load(SAVED_MODEL_PATH, "serve")) { assertNotNull(bundle.session()); assertNotNull(bundle.graph()); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SignatureTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SignatureTest.java index 8f636fb7459..28e97939aa2 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SignatureTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SignatureTest.java @@ -19,8 +19,11 @@ import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Signature.TensorDescription; +import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; +import org.tensorflow.op.core.Placeholder; import org.tensorflow.proto.DataType; +import org.tensorflow.types.TInt32; public class SignatureTest { @@ -80,4 +83,29 @@ public void emptyMethodNameConvertedToNull() { signature = Signature.builder().key("f").methodName(null).build(); assertNull(signature.methodName()); } + + @Test + public void createTensorInfoFromOperandWithUnknownShape() { + try (Graph g = new Graph()) { + var tf = Ops.create(g); + var placeholder = tf.placeholder(TInt32.class); + var tensorInfo = Signature.Builder.toTensorInfo(placeholder.asOutput()); + assertTrue(tensorInfo.getTensorShape().getUnknownRank()); + assertEquals(0, tensorInfo.getTensorShape().getDimCount()); + } + } + + @Test + public void createTensorInfoFromOperandWithPartiallyUnknownShape() { + try (Graph g = new Graph()) { + var tf = Ops.create(g); + var shape = Shape.of(Shape.UNKNOWN_SIZE, 10); + var placeholder = tf.placeholder(TInt32.class, Placeholder.shape(shape)); + var tensorInfo = Signature.Builder.toTensorInfo(placeholder.asOutput()); + assertFalse(tensorInfo.getTensorShape().getUnknownRank()); + assertEquals(2, tensorInfo.getTensorShape().getDimCount()); + assertEquals(-1, tensorInfo.getTensorShape().getDim(0).getSize()); + assertEquals(10, tensorInfo.getTensorShape().getDim(1).getSize()); + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java index 246b44b8077..53af60d44bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java @@ -17,6 +17,7 @@ package org.tensorflow.op.core; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; @@ -66,4 +67,39 @@ public void testBooleanMask() { } } } + + @Test + public void testBooleanMaskWithPartiallyUnknownShape() { + try (Graph g = new Graph(); + Session sess = new Session(g)) { + Scope scope = new OpScope(g); + + Operand input = Constant.arrayOf(scope, 1, 2, 3, 4); + Placeholder inputMask = + Placeholder.create(scope, TBool.class, Placeholder.shape(Shape.of(Shape.UNKNOWN_SIZE))); + + Operand output = BooleanMask.create(scope, input, inputMask); + + try (TBool mask = TBool.vectorOf(true, false, false, true); + TInt32 result = (TInt32) sess.runner().feed(inputMask, mask).fetch(output).run().get(0)) { + // expected shape from Python tensorflow + assertEquals(Shape.of(2), result.shape()); + assertEquals(1, result.getInt(0)); + assertEquals(4, result.getInt(1)); + } + } + } + + @Test + public void testBooleanMaskWithUnknownShape() { + try (Graph g = new Graph()) { + Scope scope = new OpScope(g); + + Operand input = Constant.arrayOf(scope, 1, 2, 3, 4); + Placeholder inputMask = Placeholder.create(scope, TBool.class); + + assertThrows( + IllegalArgumentException.class, () -> BooleanMask.create(scope, input, inputMask)); + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java index 4edbea33b0d..84f4229144b 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java @@ -17,6 +17,7 @@ package org.tensorflow.op.core; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; @@ -151,4 +152,44 @@ public void testBooleanMaskUpdateAxis() { } } } + + @Test + public void testBooleanMaskUpdateWithPartiallyUnknownShape() { + try (Graph g = new Graph(); + Session sess = new Session(g)) { + Scope scope = new OpScope(g); + + Operand input = Constant.arrayOf(scope, 1, 2, 3, 4); + Operand updates = Constant.arrayOf(scope, -1, 2); + Placeholder inputMask = + Placeholder.create(scope, TBool.class, Placeholder.shape(Shape.of(Shape.UNKNOWN_SIZE))); + + Operand output = BooleanMaskUpdate.create(scope, input, inputMask, updates); + + try (TBool mask = TBool.vectorOf(false, true, false, true); + TInt32 result = (TInt32) sess.runner().feed(inputMask, mask).fetch(output).run().get(0)) { + // expected shape from Python tensorflow + assertEquals(Shape.of(4), result.shape()); + assertEquals(1, result.getInt(0)); + assertEquals(-1, result.getInt(1)); + assertEquals(3, result.getInt(2)); + assertEquals(2, result.getInt(3)); + } + } + } + + @Test + public void testBooleanMaskUpdateWithUnknownShape() { + try (Graph g = new Graph()) { + Scope scope = new OpScope(g); + + Operand input = Constant.arrayOf(scope, 1, 2, 3, 4); + Operand updates = Constant.arrayOf(scope, -1, 2); + Placeholder inputMask = Placeholder.create(scope, TBool.class); + + assertThrows( + IllegalArgumentException.class, + () -> BooleanMaskUpdate.create(scope, input, inputMask, updates)); + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IfTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IfTest.java index 57bc0bc9ffb..16cd17cab8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IfTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IfTest.java @@ -27,6 +27,7 @@ import org.tensorflow.Session; import org.tensorflow.Signature; import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; public class IfTest { @@ -37,7 +38,8 @@ private static Operand basicIf(Ops tf, Operand a, Operand { Operand a1 = ops.placeholder(TInt32.class); Operand b1 = ops.placeholder(TInt32.class); - return Signature.builder().input("a", a1).input("b", b1).output("y", a1).build(); + Operand y = ops.identity(a1); + return Signature.builder().input("a", a1).input("b", b1).output("y", y).build(); }); ConcreteFunction elseBranch = @@ -45,7 +47,10 @@ private static Operand basicIf(Ops tf, Operand a, Operand { Operand a1 = ops.placeholder(TInt32.class); Operand b1 = ops.placeholder(TInt32.class); - Operand y = ops.math.neg(b1); + // Casts around the math.neg operator as it's not implemented correctly for int32 in + // GPUs at some point between TF 2.10 and TF 2.15. + Operand y = + ops.dtypes.cast(ops.math.neg(ops.dtypes.cast(b1, TFloat32.class)), TInt32.class); return Signature.builder().input("a", a1).input("b", b1).output("y", y).build(); }); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBoolTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBoolTest.java new file mode 100644 index 00000000000..df5e1333b00 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBoolTest.java @@ -0,0 +1,156 @@ +/* + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ======================================================================= + */ + +package org.tensorflow.types; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.junit.jupiter.api.Test; +import org.tensorflow.EagerSession; +import org.tensorflow.ndarray.NdArray; +import org.tensorflow.ndarray.NdArrays; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.ndarray.index.Indices; +import org.tensorflow.op.Ops; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.math.LogicalAnd; +import org.tensorflow.op.math.LogicalNot; +import org.tensorflow.op.math.LogicalOr; + +public class TBoolTest { + + @Test + public void createScalar() { + TBool tensorT = TBool.scalarOf(true); + assertNotNull(tensorT); + assertEquals(Shape.scalar(), tensorT.shape()); + assertEquals(true, tensorT.getObject()); + + TBool tensorF = TBool.scalarOf(false); + assertNotNull(tensorF); + assertEquals(Shape.scalar(), tensorF.shape()); + assertEquals(false, tensorF.getObject()); + } + + @Test + public void createVector() { + TBool tensor = TBool.vectorOf(true, false); + assertNotNull(tensor); + assertEquals(Shape.of(2), tensor.shape()); + assertEquals(true, tensor.getObject(0)); + assertEquals(false, tensor.getObject(1)); + } + + @Test + public void createCopy() { + NdArray bools = + NdArrays.ofObjects(Boolean.class, Shape.of(2, 2)) + .setObject(true, 0, 0) + .setObject(false, 0, 1) + .setObject(false, 1, 0) + .setObject(true, 1, 1); + + TBool tensor = TBool.tensorOf(bools); + assertNotNull(tensor); + bools.scalars().forEachIndexed((idx, s) -> assertEquals(s.getObject(), tensor.getObject(idx))); + } + + @Test + public void initializeTensorsWithBools() { + // Allocate a tensor of booleans of the shape (2, 3, 2) + TBool tensor = TBool.tensorOf(Shape.of(2, 3, 2)); + + assertEquals(3, tensor.rank()); + assertEquals(12, tensor.size()); + NdArray data = (NdArray) tensor; + + try (EagerSession session = EagerSession.create()) { + Ops tf = Ops.create(session); + + // Initialize tensor memory with falses and take a snapshot + data.scalars().forEach(scalar -> ((NdArray) scalar).setObject(false)); + Constant x = tf.constantOf(tensor); + + // Initialize the same tensor memory with trues and take a snapshot + data.scalars().forEach(scalar -> ((NdArray) scalar).setObject(true)); + Constant y = tf.constantOf(tensor); + + // Calculate x AND y and validate the result + LogicalAnd xAndY = tf.math.logicalAnd(x, y); + ((NdArray) xAndY.asTensor()) + .scalars() + .forEach(scalar -> assertEquals(false, scalar.getObject())); + + // Calculate x OR y and validate the result + LogicalOr xOrY = tf.math.logicalOr(x, y); + ((NdArray) xOrY.asTensor()) + .scalars() + .forEach(scalar -> assertEquals(true, scalar.getObject())); + + // Calculate !x and validate the result against y + LogicalNot notX = tf.math.logicalNot(x); + assertEquals(y.asTensor(), notX.asTensor()); + } + } + + @Test + public void setAndCompute() { + NdArray heapData = + NdArrays.ofBooleans(Shape.of(4)) + .setObject(true, 0) + .setObject(false, 1) + .setObject(true, 2) + .setObject(false, 3); + + // Creates a 2x2 matrix + try (TBool tensor = TBool.tensorOf(Shape.of(2, 2))) { + NdArray data = (NdArray) tensor; + + // Copy first 2 values of the vector to the first row of the matrix + data.set(heapData.slice(Indices.range(0, 2)), 0); + + // Copy values at an odd position in the vector as the second row of the matrix + data.set(heapData.slice(Indices.odd()), 1); + + assertEquals(true, data.getObject(0, 0)); + assertEquals(false, data.getObject(0, 1)); + assertEquals(false, data.getObject(1, 0)); + assertEquals(false, data.getObject(1, 1)); + + // Read rows of the tensor in reverse order + NdArray flippedData = data.slice(Indices.flip(), Indices.flip()); + + assertEquals(false, flippedData.getObject(0, 0)); + assertEquals(false, flippedData.getObject(0, 1)); + assertEquals(false, flippedData.getObject(1, 0)); + assertEquals(true, flippedData.getObject(1, 1)); + + try (EagerSession session = EagerSession.create()) { + Ops tf = Ops.create(session); + + LogicalNot sub = tf.math.logicalNot(tf.constantOf(tensor)); + NdArray result = (NdArray) sub.asTensor(); + + assertEquals(false, result.getObject(0, 0)); + assertEquals(true, result.getObject(0, 1)); + assertEquals(true, result.getObject(1, 0)); + assertEquals(true, result.getObject(1, 1)); + } + } + } +} diff --git a/tensorflow-core/tensorflow-core-generator/pom.xml b/tensorflow-core/tensorflow-core-generator/pom.xml index 1e19903ff9c..bb532f5deab 100644 --- a/tensorflow-core/tensorflow-core-generator/pom.xml +++ b/tensorflow-core/tensorflow-core-generator/pom.xml @@ -5,7 +5,7 @@ org.tensorflow tensorflow-core - 1.0.0-SNAPSHOT + 1.2.0-SNAPSHOT tensorflow-core-generator jar @@ -44,7 +44,7 @@ com.squareup javapoet - 1.12.1 + 1.13.0 com.google.protobuf @@ -62,6 +62,16 @@ commonmark 0.17.1 + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/module-info.java b/tensorflow-core/tensorflow-core-generator/src/main/java/module-info.java index 1b155bc3af1..a6efd2561a3 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/module-info.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/module-info.java @@ -14,13 +14,18 @@ limitations under the License. ======================================================================= */ + +/** + * Code to generate the Java side implementations of TensorFlow's ops based on the TensorFlow op + * definition files. + */ module tensorflow.generator { requires tensorflow.nativelib; - requires java.compiler; + requires transitive java.compiler; requires com.github.javaparser.core; requires com.google.protobuf; requires com.google.common; - requires com.squareup.javapoet; + requires transitive com.squareup.javapoet; requires org.commonmark; requires spring.core; diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/ClassGenerator.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/ClassGenerator.java index 51151992194..91d66134880 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/ClassGenerator.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/ClassGenerator.java @@ -105,7 +105,7 @@ enum RenderMode { /** * The generated options class, or null if it doesn't have one or {@link #buildOptionsClass()} has - * not been ran. + * not been run. */ private TypeSpec optionsClass = null; @@ -297,8 +297,6 @@ void buildClass() { if (seenGenerics.add(typeVar.name)) { typeParams.add(typeVar); builder.addTypeVariable(typeVar); - builder.addJavadoc( - "\n@param <$L> data type for {@code $L} output\n", typeVar.name, output.getName()); } } } @@ -750,6 +748,10 @@ private void buildSecondaryFactory( body.add("$T.class", defaultTypes.get(attr)); } else { factoryBuilder.addParameter(param); + // Checking if the parameter being added is the variadic options or not + if (param.name.equals("options")) { + factoryBuilder.varargs(); + } factoryBuilder.addJavadoc("\n@param $L $L", param.name, paramTags.get(param.name)); typeVars.addAll(new ResolvedType(param.type).findGenerics()); body.add("$L", param.name); @@ -1008,7 +1010,7 @@ private Set buildInputsClass() { attrNames.add(CodeBlock.of("$S", attr.getName()).toString()); inputsBuilder.addField( FieldSpec.builder(javaType, name, Modifier.PUBLIC, Modifier.FINAL) - .addJavadoc("$L", description) + .addJavadoc("$L", parseDocumentation(description)) .build()); fieldInits.addStatement( "$L = op.attributes().getAttr$L($S)", diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/OpGenerator.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/OpGenerator.java index 17607b2e937..2f3c27b28f2 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/OpGenerator.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/OpGenerator.java @@ -37,6 +37,7 @@ import java.util.Map; import java.util.Scanner; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.bytedeco.javacpp.BytePointer; import org.springframework.core.io.support.PathMatchingResourcePatternResolver; import org.tensorflow.internal.c_api.TF_ApiDefMap; @@ -72,7 +73,7 @@ public final class OpGenerator { private static final String DEFAULT_OP_DEF_FILE = "org/tensorflow/ops.pbtxt"; - private static final Scanner USER_PROMPT = new Scanner(System.in); + private static final Scanner USER_PROMPT = new Scanner(System.in, StandardCharsets.UTF_8); /** * Args should be {@code [base_package]}. @@ -195,7 +196,8 @@ private static String arg(String[] args, int idx) { private static OpList readOpList(String filename, InputStream protoInput) { try { if (filename.endsWith(".pbtxt")) { - return TextFormat.parse(new String(protoInput.readAllBytes()), OpList.class); + return TextFormat.parse( + new String(protoInput.readAllBytes(), StandardCharsets.UTF_8), OpList.class); } return OpList.parseFrom(protoInput); @@ -286,9 +288,8 @@ private void mergeBaseApiDefs(TF_ApiDefMap apiDefMap, TF_Status status) { } private void mergeApiDefs(TF_ApiDefMap apiDefMap, TF_Status status) { - try { - Files.walk(apiDefsPath) - .filter(p -> p.toString().endsWith(".pbtxt")) + try (Stream s = Files.walk(apiDefsPath)) { + s.filter(p -> p.toString().endsWith(".pbtxt")) .forEach( p -> { try { @@ -316,7 +317,7 @@ private void createApiDef(OpDef opDef, File apiDefFile) throws IOException { ApiDef.Visibility visibility = null; do { System.out.print( - " Choose visibility of this op [v]isible/[h]idden/[s]kip/[d]efault (default=d): "); + " Choose visibility of this op [v]isible/[h]idden/[s]kip/[d]efault (default=v): "); var value = USER_PROMPT.nextLine().trim(); if (!value.isEmpty()) { switch (value) { @@ -343,7 +344,7 @@ private void createApiDef(OpDef opDef, File apiDefFile) throws IOException { break; } } else { - visibility = ApiDef.Visibility.DEFAULT_VISIBILITY; + visibility = ApiDef.Visibility.VISIBLE; } } while (visibility == null); @@ -366,7 +367,7 @@ private void createApiDef(OpDef opDef, File apiDefFile) throws IOException { if (!apiDefFile.exists() && !apiDefFile.createNewFile()) { System.err.println("Cannot create API definition file \"" + apiDefFile.getPath() + "\""); } - try (var apiDefWriter = new FileWriter(apiDefFile)) { + try (var apiDefWriter = new FileWriter(apiDefFile, StandardCharsets.UTF_8)) { var apiDefs = ApiDefs.newBuilder(); apiDefs.addOp(apiDef.build()); apiDefWriter.write(TextFormat.printer().printToString(apiDefs.build())); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/CoreJavaDocNodeRenderer.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/CoreJavaDocNodeRenderer.java index 82be0136b2b..37c26c2a292 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/CoreJavaDocNodeRenderer.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/CoreJavaDocNodeRenderer.java @@ -3,7 +3,6 @@ import com.google.common.base.CaseFormat; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; @@ -152,14 +151,11 @@ public class CoreJavaDocNodeRenderer extends AbstractVisitor implements NodeRend }; private static final Set allowedHtml5Tags = new HashSet<>(Arrays.asList(html5Tags)); private static final Map urlLinkConversion = - new HashMap() { - { - put("../../../api_docs/python/math_ops", "org.tensorflow.op.MathOps"); - put( - "https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update", + Map.of( + "../../../api_docs/python/math_ops", "org.tensorflow.op.MathOps", + "https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update", "org.tensorflow.op.Ops#tensorScatterNdUpdate"); - } - }; + protected final JavaDocNodeRendererContext context; private final JavaDocWriter writer; private boolean firstParagraph; @@ -431,7 +427,7 @@ public void visit(Code code) { public void visit(HtmlInline htmlInline) { String text = htmlInline.getLiteral(); // handle non- JavaDoc html, e.g. - String tag = text.replace("\\", ""); + String tag = text.replace("/", ""); if (!allowedHtml5Tags.contains(tag.toLowerCase())) { text = text.replace("<", "<").replace(">", ">"); writer.raw(text); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocNodeRendererContext.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocNodeRendererContext.java index afd43edb191..80767db57ac 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocNodeRendererContext.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocNodeRendererContext.java @@ -9,6 +9,8 @@ public interface JavaDocNodeRendererContext { /** + * Encode a URL into a String. + * * @param url to be encoded * @return an encoded URL (depending on the configuration) */ @@ -26,11 +28,15 @@ public interface JavaDocNodeRendererContext { Map extendAttributes(Node node, String tagName, Map attributes); /** + * Gets the HTML writer. + * * @return the HTML writer to use */ JavaDocWriter getWriter(); /** + * The HTML for a line break. + * * @return HTML that should be rendered for a soft line break */ String getSoftbreak(); @@ -45,17 +51,23 @@ public interface JavaDocNodeRendererContext { void render(Node node); /** + * Should HTML be escaped? + * * @return whether HTML blocks and tags should be escaped or not */ boolean shouldEscapeHtml(); /** + * Should URLs be sanitized? + * * @return true if the {@link UrlSanitizer} should be used. * @since 0.14.0 */ boolean shouldSanitizeUrls(); /** + * Gets the URL sanitizer. + * * @return Sanitizer to use for securing {@link Link} href and {@link Image} src if {@link * #shouldSanitizeUrls()} is true. * @since 0.14.0 diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocRenderer.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocRenderer.java index 66ea531b674..33b0aaf87b3 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocRenderer.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/javadoc/JavaDocRenderer.java @@ -65,13 +65,7 @@ private JavaDocRenderer(Builder builder) { this.nodeRendererFactories = new ArrayList<>(builder.nodeRendererFactories.size() + 1); this.nodeRendererFactories.addAll(builder.nodeRendererFactories); // Add as last. This means clients can override the rendering of core nodes if they want. - this.nodeRendererFactories.add( - new JavaDocNodeRendererFactory() { - @Override - public NodeRenderer create(JavaDocNodeRendererContext context) { - return new CoreJavaDocNodeRenderer(context); - } - }); + this.nodeRendererFactories.add(CoreJavaDocNodeRenderer::new); } /** diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/processor/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/processor/OperatorProcessor.java index 57d18cb4c7b..5376d45027b 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/processor/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/generator/op/processor/OperatorProcessor.java @@ -459,8 +459,7 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { TypeSpec.classBuilder(spec.className) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) .addJavadoc( - "An API for building {@code $L} operations as {@link $T Op}s\n\n" - + "@see {@link $T}\n", + "An API for building {@code $L} operations as {@link $T Op}s\n\n" + "@see $T\n", spec.groupName, Names.Op, Names.Ops) @@ -546,7 +545,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return new $T(scope.withSubScope(childScopeName))", Names.Ops) .addJavadoc( "Returns an API that builds operations with the provided name prefix.\n" - + "\n@see {@link $T#withSubScope(String)}\n", + + "\n@see $T#withSubScope(String)\n", Names.Scope) .build()); @@ -572,7 +571,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return new Ops(scope.withName(opName))") .addJavadoc( "Returns an API that uses the provided name for an op.\n\n" - + "@see {@link $T#withName(String)}\n", + + "@see $T#withName(String)\n", Names.Scope) .build()); @@ -584,7 +583,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return new Ops(scope.withDevice(deviceSpec))") .addJavadoc( "Returns an API that places the created operations on the device(s) matching the provided spec.\n\n" - + "@see {@link $T#withDevice(DeviceSpec)}\n", + + "@see $T#withDevice(DeviceSpec)\n", Names.Scope) .build()); @@ -596,7 +595,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return new Ops(scope.withControlDependencies(controls))") .addJavadoc( "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencies(Iterable>)}\n", + + "@see $T#withControlDependencies(Iterable)\n", Names.Scope) .build()); @@ -609,7 +608,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return withControlDependencies($T.asList(controls))", Names.Arrays) .addJavadoc( "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencies(Iterable>)}\n", + + "@see $T#withControlDependencies(Iterable)\n", Names.Scope) .build()); @@ -621,7 +620,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return new Ops(scope.withControlDependencyOps(controls))") .addJavadoc( "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencyOps(Iterable)}\n", + + "@see $T#withControlDependencyOps(Iterable)\n", Names.Scope) .build()); @@ -634,7 +633,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addStatement("return withControlDependencyOps($T.asList(controls))", Names.Arrays) .addJavadoc( "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencyOps(Iterable)}\n", + + "@see $T#withControlDependencyOps(Iterable)\n", Names.Scope) .build()); diff --git a/tensorflow-core/tensorflow-core-native/.bazelrc b/tensorflow-core/tensorflow-core-native/.bazelrc index 8c72fa1398b..7437d982e91 100644 --- a/tensorflow-core/tensorflow-core-native/.bazelrc +++ b/tensorflow-core/tensorflow-core-native/.bazelrc @@ -1,3 +1,5 @@ -#build --remote_cache=https://storage.googleapis.com/tensorflow-sigs-jvm -#build --remote_upload_local_results=false build --experimental_ui_max_stdouterr_bytes=-1 + +# We don't need this cache, since we are not building TF from GitHub Actions anymore. Just keeping this here for reference +# build --remote_cache=https://storage.googleapis.com/tensorflow-sigs-jvm +# build --remote_upload_local_results=false diff --git a/tensorflow-core/tensorflow-core-native/.bazelversion b/tensorflow-core/tensorflow-core-native/.bazelversion index 358e78e6074..f3c238740e5 100644 --- a/tensorflow-core/tensorflow-core-native/.bazelversion +++ b/tensorflow-core/tensorflow-core-native/.bazelversion @@ -1 +1,2 @@ -6.1.0 \ No newline at end of file +6.5.0 +# NOTE: Update Bazel version in tensorflow/tools/ci_build/release/common.sh.oss \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-native/BUILD b/tensorflow-core/tensorflow-core-native/BUILD index e8d01ef0a72..b3b1a2cfcd7 100644 --- a/tensorflow-core/tensorflow-core-native/BUILD +++ b/tensorflow-core/tensorflow-core-native/BUILD @@ -4,6 +4,8 @@ java_proto_library( name = "java_proto_gen_sources", deps = [ "@org_tensorflow//tensorflow/core:protos_all", + "@local_xla//xla/tsl/protobuf:bfc_memory_map_proto", + "@local_xla//xla/tsl/protobuf:test_log_proto", "@local_tsl//tsl/protobuf:protos_all" ] ) diff --git a/tensorflow-core/tensorflow-core-native/WORKSPACE b/tensorflow-core/tensorflow-core-native/WORKSPACE index ad2c878d529..ad2c74508ad 100644 --- a/tensorflow-core/tensorflow-core-native/WORKSPACE +++ b/tensorflow-core/tensorflow-core-native/WORKSPACE @@ -14,96 +14,74 @@ http_archive( patch_tool = "patch", patch_args = ["-p1"], patch_cmds = [ - "find tensorflow third_party/xla/third_party/tsl -name \\*.proto | xargs sed -i.bak '/^option java_package/d'", - "find tensorflow third_party/xla/third_party/tsl -name \\*.proto | xargs sed -i.bak 's/^package tensorflow\\([^;]*\\).*$/package tensorflow\\1;\\noption java_package = \"org.tensorflow.proto\\1\";/'", + "find tensorflow third_party/xla/third_party/tsl third_party/xla/xla/tsl -name \\*.proto | xargs sed -i.bak '/^option java_package/d'", + "find tensorflow third_party/xla/third_party/tsl third_party/xla/xla/tsl -name \\*.proto | xargs sed -i.bak 's/^package tensorflow\\([^;]*\\).*$/package tensorflow\\1;\\noption java_package = \"org.tensorflow.proto\\1\";/'", ], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.18.0.tar.gz", ], - sha256 = "9cec5acb0ecf2d47b16891f8bc5bc6fbfdffe1700bdadc0d9ebe27ea34f0c220", - strip_prefix = "tensorflow-2.15.0" + sha256 = "d7876f4bb0235cac60eb6316392a7c48676729860da1ab659fb440379ad5186d", + strip_prefix = "tensorflow-2.18.0" ) ##### Copy content of tensorflow/WORKSPACE here (make sure to change references of default package "//" to "@org_tensorflow//") +# buildifier: disable=load-on-top + # We must initialize hermetic python first. load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( - name = "bazel_skylib", - sha256 = "74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz", - "https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz", - ], + name = "rules_java", + sha256 = "c73336802d0b4882e40770666ad055212df4ea62cfa6edf9cb0f9d29828a0934", + url = "https://github.com/bazelbuild/rules_java/releases/download/5.3.5/rules_java-5.3.5.tar.gz", ) -http_archive( - name = "rules_python", - sha256 = "9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b", - strip_prefix = "rules_python-0.26.0", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.26.0/rules_python-0.26.0.tar.gz", -) +# Initialize the TensorFlow repository and all dependencies. +# +# The cascade of load() statements and tf_workspace?() calls works around the +# restriction that load() statements need to be at the top of .bzl files. +# E.g. we can not retrieve a new repository with http_archive and then load() +# a macro from that repository in the same file. +load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") -load("@rules_python//python:repositories.bzl", "py_repositories") +tf_workspace3() -py_repositories() +# Initialize hermetic Python +load("@local_xla//third_party/py:python_init_rules.bzl", "python_init_rules") -load("@rules_python//python:repositories.bzl", "python_register_toolchains") -load( - "@org_tensorflow//tensorflow/tools/toolchains/python:python_repo.bzl", - "python_repository", -) - -python_repository(name = "python_version_repo") +python_init_rules() -load("@python_version_repo//:py_version.bzl", "HERMETIC_PYTHON_VERSION") +load("@local_xla//third_party/py:python_init_repositories.bzl", "python_init_repositories") -python_register_toolchains( - name = "python", - ignore_root_user_error = True, - python_version = HERMETIC_PYTHON_VERSION, +python_init_repositories( + default_python_version = "system", + local_wheel_dist_folder = "dist", + local_wheel_inclusion_list = [ + "tensorflow*", + "tf_nightly*", + ], + local_wheel_workspaces = ["//:WORKSPACE"], + requirements = { + "3.9": "@org_tensorflow//:requirements_lock_3_9.txt", + "3.10": "@org_tensorflow//:requirements_lock_3_10.txt", + "3.11": "@org_tensorflow//:requirements_lock_3_11.txt", + "3.12": "@org_tensorflow//:requirements_lock_3_12.txt", + }, ) -load("@python//:defs.bzl", "interpreter") -load("@rules_python//python:pip.bzl", "package_annotation", "pip_parse") +load("@local_xla//third_party/py:python_init_toolchains.bzl", "python_init_toolchains") -NUMPY_ANNOTATIONS = { - "numpy": package_annotation( - additive_build_content = """\ -filegroup( - name = "includes", - srcs = glob(["site-packages/numpy/core/include/**/*.h"]), -) -cc_library( - name = "numpy_headers", - hdrs = [":includes"], - strip_include_prefix="site-packages/numpy/core/include/", -) -""", - ), -} +python_init_toolchains() -#pip_parse( -# name = "pypi", -# annotations = NUMPY_ANNOTATIONS, -# python_interpreter_target = interpreter, -# requirements = "//:requirements_lock_" + HERMETIC_PYTHON_VERSION.replace(".", "_") + ".txt", -#) +load("@local_xla//third_party/py:python_init_pip.bzl", "python_init_pip") -#load("@pypi//:requirements.bzl", "install_deps") +python_init_pip() -#install_deps() +load("@pypi//:requirements.bzl", "install_deps") -# Initialize the TensorFlow repository and all dependencies. -# -# The cascade of load() statements and tf_workspace?() calls works around the -# restriction that load() statements need to be at the top of .bzl files. -# E.g. we can not retrieve a new repository with http_archive and then load() -# a macro from that repository in the same file. -load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") - -tf_workspace3() +install_deps() +# End hermetic Python initialization load("@org_tensorflow//tensorflow:workspace2.bzl", "tf_workspace2") @@ -116,3 +94,50 @@ tf_workspace1() load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0") tf_workspace0() + +load( + "@local_tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl", + "cuda_json_init_repository", +) + +cuda_json_init_repository() + +load( + "@cuda_redist_json//:distributions.bzl", + "CUDA_REDISTRIBUTIONS", + "CUDNN_REDISTRIBUTIONS", +) +load( + "@local_tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl", + "cuda_redist_init_repositories", + "cudnn_redist_init_repository", +) + +cuda_redist_init_repositories( + cuda_redistributions = CUDA_REDISTRIBUTIONS, +) + +cudnn_redist_init_repository( + cudnn_redistributions = CUDNN_REDISTRIBUTIONS, +) + +load( + "@local_tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl", + "cuda_configure", +) + +cuda_configure(name = "local_config_cuda") + +load( + "@local_tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl", + "nccl_redist_init_repository", +) + +nccl_redist_init_repository() + +load( + "@local_tsl//third_party/nccl/hermetic:nccl_configure.bzl", + "nccl_configure", +) + +nccl_configure(name = "local_config_nccl") \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-native/pom.xml b/tensorflow-core/tensorflow-core-native/pom.xml index 00c67371547..bb9eb053c33 100644 --- a/tensorflow-core/tensorflow-core-native/pom.xml +++ b/tensorflow-core/tensorflow-core-native/pom.xml @@ -6,7 +6,7 @@ org.tensorflow tensorflow-core - 1.0.0-SNAPSHOT + 1.2.0-SNAPSHOT tensorflow-core-native jar @@ -69,7 +69,9 @@ false false + false false + false true @@ -115,19 +117,19 @@ ${project.groupId} ${project.artifactId} ${project.version} - ${javacpp.platform.macosx-x86_64} + ${javacpp.platform.macosx-arm64} ${project.groupId} ${project.artifactId} ${project.version} - ${javacpp.platform.macosx-arm64} + ${javacpp.platform.windows-x86_64} ${project.groupId} ${project.artifactId} ${project.version} - ${javacpp.platform.windows-x86_64} + ${javacpp.platform.linux-arm64} @@ -148,24 +150,24 @@ - ${project.build.directory}/${artifactId}-${project.version}-${javacpp.platform.linux-x86_64}.jar + ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.linux-x86_64}.jar ${javacpp.platform.linux-x86_64} - ${project.build.directory}/${artifactId}-${project.version}-${javacpp.platform.linux-x86_64}-gpu.jar + ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.linux-x86_64}-gpu.jar ${javacpp.platform.linux-x86_64}-gpu - ${project.build.directory}/${artifactId}-${project.version}-${javacpp.platform.macosx-arm64}.jar + ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.macosx-arm64}.jar ${javacpp.platform.macosx-arm64} - ${project.build.directory}/${artifactId}-${project.version}-${javacpp.platform.macosx-x86_64}.jar - ${javacpp.platform.macosx-x86_64} + ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.windows-x86_64}.jar + ${javacpp.platform.windows-x86_64} - ${project.build.directory}/${artifactId}-${project.version}-${javacpp.platform.windows-x86_64}.jar - ${javacpp.platform.windows-x86_64} + ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.linux-arm64}.jar + ${javacpp.platform.linux-arm64} @@ -316,6 +318,8 @@ ${native.source.directory}/org/tensorflow/internal/c_api/ ${project.basedir}/bazel-${project.artifactId}/external/org_tensorflow/ + ${project.basedir}/bazel-${project.artifactId}/external/local_tsl/ + ${project.basedir}/bazel-${project.artifactId}/external/local_xla/ ${project.basedir}/bazel-bin/external/org_tensorflow/ ${project.basedir}/bazel-${project.artifactId}/external/com_google_absl/ ${project.basedir}/bazel-${project.artifactId}/external/eigen_archive/ @@ -528,6 +532,11 @@ jar + + + tensorflow.nativelib.${os.name}.${os.arch} + + ${native.classifier} true @@ -630,27 +639,6 @@ - - - maven-javadoc-plugin - 3.6.0 - - - attach-javadocs - - jar - - - false - 256m - 2048m - - http://bytedeco.org/javacpp/apidocs - - - - - diff --git a/tensorflow-core/tensorflow-core-native/scripts/bazel_generate.sh b/tensorflow-core/tensorflow-core-native/scripts/bazel_generate.sh index 9d9941d1cf8..ab0fd0ec6c1 100755 --- a/tensorflow-core/tensorflow-core-native/scripts/bazel_generate.sh +++ b/tensorflow-core/tensorflow-core-native/scripts/bazel_generate.sh @@ -24,7 +24,7 @@ cp -f $TENSORFLOW_SRCS/core/ops/ops.pbtxt $GEN_RESOURCE_DIR/org/tensorflow cp -rf $TENSORFLOW_SRCS/core/api_def/base_api $GEN_RESOURCE_DIR/org/tensorflow/ # Copy generated Java protos from source jars -echo "Extracting TF/TSL proto Java sources" +echo "Extracting TF/TSL/XLA proto Java sources" cd $GEN_SRCS_DIR -find $TENSORFLOW_BIN $BAZEL_BIN/external/local_tsl/tsl -name \*-speed-src.jar -exec jar xf {} \; +find $TENSORFLOW_BIN $BAZEL_BIN/external/local_tsl/tsl $BAZEL_BIN/external/local_xla/xla -name \*-speed-src.jar -exec jar xf {} \; rm -rf META-INF diff --git a/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh b/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh index a97220538c3..acf28b9391d 100755 --- a/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh +++ b/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh @@ -5,20 +5,20 @@ DOWNLOAD_FOLDER="$1" case ${PLATFORM:-} in 'linux-x86_64') - WHEEL_URL='https://files.pythonhosted.org/packages/fa/44/a1698c62942d20cab378ba201a6cbfcce579418351a0c6e4ea9d66c9adf2/tensorflow_cpu-2.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/aa/1d/032a9d40762895e51cad06f382135c14d16487a0ad9dcc65aae5bd89c968/tensorflow_cpu-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' ;; 'linux-x86_64-gpu') - WHEEL_URL='https://files.pythonhosted.org/packages/93/c0/a774286d0383419f558deb27096e5de9f9facd6c27df8e9f9af6fba2f77e/tensorflow-2.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/84/76/c55967ac9968ddaede25a4dce37aba37e9030656f02c12676151ce1b6f22/tensorflow-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' ;; - 'macosx-x86_64') - WHEEL_URL='https://files.pythonhosted.org/packages/92/2d/880fcd65e4414b05088193e6f2cfb86fdf90003dd2dd0f4d1bc465348f0e/tensorflow-2.15.0-cp311-cp311-macosx_10_15_x86_64.whl' + 'linux-arm64') + WHEEL_URL='https://files.pythonhosted.org/packages/56/e4/55aaac2b15af4dad079e5af329a79d961e5206589d0e02b1e8da221472ed/tensorflow-2.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl' ;; 'macosx-arm64') - WHEEL_URL='https://files.pythonhosted.org/packages/eb/9f/0759e2fea4a3c48f070b64811c2c57036b46353ba87263afc810b8f4188a/tensorflow_macos-2.15.0-cp311-cp311-macosx_12_0_arm64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/26/08/556c4159675c1a30e077ec2a942eeeb81b457cc35c247a5b4a59a1274f05/tensorflow-2.18.0-cp311-cp311-macosx_12_0_arm64.whl' ;; 'windows-x86_64') - WHEEL_URL='https://files.pythonhosted.org/packages/4c/48/1a5a15517f18eaa4ff8d598b1c000300b20c1bb0e624539d702117a0c369/tensorflow_intel-2.15.0-cp311-cp311-win_amd64.whl' - CLIB_URL='https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-2.15.0.zip' + WHEEL_URL='https://files.pythonhosted.org/packages/76/ad/fa6c508a15ff79cb5409294c293388e0999b7d480f84b65e4287277434fe/tensorflow_intel-2.18.0-cp311-cp311-win_amd64.whl' + CLIB_URL='https://storage.googleapis.com/tensorflow/versions/2.18.0/libtensorflow-cpu-windows-x86_64.zip' ;; *) echo "TensorFlow distribution for ${PLATFORM} is not supported for download" @@ -34,6 +34,9 @@ if [[ -n "$WHEEL_URL" ]]; then curl -L $WHEEL_URL --output 'tensorflow.whl' fi yes | unzip -q -u 'tensorflow.whl' # use 'yes' because for some reasons -u does not work on Windows + if [[ "$PLATFORM" == "linux-arm64" ]]; then + cp $DOWNLOAD_FOLDER/tensorflow.libs/* $DOWNLOAD_FOLDER/tensorflow/ + fi fi if [[ -n "$CLIB_URL" ]]; then @@ -48,6 +51,10 @@ cd tensorflow if [[ "$PLATFORM" =~ "linux" ]]; then ln -fs libtensorflow_cc.so.2 libtensorflow_cc.so ln -fs libtensorflow_framework.so.2 libtensorflow_framework.so + if [[ "$PLATFORM" == "linux-arm64" ]]; then + cp ../tensorflow.libs/libomp-6196b3b5.so.5 libomp-6196b3b5.so.5 + ln -fs libomp-6196b3b5.so.5 libomp-6196b3b5.so + fi elif [[ "$PLATFORM" =~ "macosx" ]]; then ln -fs libtensorflow_cc.2.dylib libtensorflow_cc.dylib ln -fs libtensorflow_framework.2.dylib libtensorflow_framework.dylib @@ -56,4 +63,4 @@ elif [[ "$PLATFORM" =~ "windows" ]]; then # (while it is also available at the root of the include folder for other platforms) cd include && ln -fs tensorflow/tsl tsl && cd - fi -ls -l . \ No newline at end of file +ls -l . diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java index cadc5930dc9..6db414f7382 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java index 3f5ef587ab1..abd96e95392 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java index 3e62d7d0acb..af6030e6503 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancelCallback.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancelCallback.java index 037bdbfaf2b..a2ab0621623 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancelCallback.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancelCallback.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancellationManager.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancellationManager.java index be8d6ba91d5..1a1524e6593 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancellationManager.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CancellationManager.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java index 88339d3a463..88322c6d243 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java index 4697c57a675..8a9768dbaec 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDevice.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDevice.java index 4b51f0e83ea..1b9ddc78173 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDevice.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDevice.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDeviceTensorHandle.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDeviceTensorHandle.java index a4b0939adb7..b6943e9f386 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDeviceTensorHandle.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_CustomDeviceTensorHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Executor.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Executor.java index 29bdf274105..53a66d08755 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Executor.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Executor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge0.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge0.java index 0d1a29dbe09..2ee4a7020c8 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge0.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge0.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge1.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge1.java index 23ed2050658..004c852fcd7 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge1.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge2.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge2.java index 50478f15ea9..e46a3f06e0d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge2.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGauge2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGaugeCell.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGaugeCell.java index 3f3ba2f7abb..6b9d73cb6f2 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGaugeCell.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBoolGaugeCell.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBuckets.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBuckets.java index 53f0870fcb8..8fa952614cf 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBuckets.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringBuckets.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter0.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter0.java index d6a1741d04c..03185b12ac1 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter0.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter0.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter1.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter1.java index 154ea614520..3fc374c963d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter1.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter2.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter2.java index 12230296e90..3f479145a55 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter2.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounter2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounterCell.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounterCell.java index 3ba3cffa6c5..c4ff6a322cd 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounterCell.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringCounterCell.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge0.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge0.java index 8e397c42218..b01c212d04e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge0.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge0.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge1.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge1.java index 17e85beebd9..46ca45846ce 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge1.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge2.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge2.java index 952667a192a..a48f8ea3d8e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge2.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGauge2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGaugeCell.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGaugeCell.java index 0a5b4ffb2d0..34d7da5854e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGaugeCell.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringIntGaugeCell.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler0.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler0.java index 52696ccd933..d0235201962 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler0.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler0.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler1.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler1.java index 7ee23fd6044..f71d9821bae 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler1.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler2.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler2.java index 03ceb795203..eaa959122f5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler2.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSampler2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSamplerCell.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSamplerCell.java index 7450bc22100..7d2b50cd5e5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSamplerCell.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringSamplerCell.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge0.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge0.java index eee853733c2..978e30fca4a 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge0.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge0.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge1.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge1.java index 9b6a251d2fb..d0a0aca659c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge1.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge2.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge2.java index ddbe1a5e0cb..85d097da12f 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge2.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge3.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge3.java index cc304e409ae..aa07f5ee144 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge3.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge3.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge4.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge4.java index c2a2d811c8d..46732e2bfcd 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge4.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGauge4.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGaugeCell.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGaugeCell.java index 62ac9ab0f33..cfee1b9991d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGaugeCell.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_MonitoringStringGaugeCell.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java index a7566da0df2..b4b9fba825e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java index be1e0e5fc9d..c72023d587f 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java index 157630bc750..7f5cc8b30f1 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java index e984edc1329..39ebd0e0984 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GradFuncAdapter.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GradFuncAdapter.java index da652c6b068..7938de58a8e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GradFuncAdapter.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GradFuncAdapter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GraphId.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GraphId.java index 5f3ec15585c..f503ed65a91 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GraphId.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_GraphId.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_Scope.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_Scope.java index 048f1a23706..8c3c0746c5c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_Scope.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TFJ_Scope.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java index abe89fdca60..5862c60b31d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java index 2274e428250..57f7d3095bf 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrBuilder.java new file mode 100644 index 00000000000..8e5a084ccce --- /dev/null +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrBuilder.java @@ -0,0 +1,21 @@ +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +// TF_NewAttrBuilder() returns an object that you can set attributes on as +// though it were an op. This allows querying properties of that op for +// type-checking purposes like if the op will run on a particular device type. +@Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class TF_AttrBuilder extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public TF_AttrBuilder() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TF_AttrBuilder(Pointer p) { super(p); } +} diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java index 14d7e44817d..6a99a4d1b8e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java index 676dd7417f2..a4bf64d3f5f 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_CheckpointReader.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_CheckpointReader.java new file mode 100644 index 00000000000..232cb85fd04 --- /dev/null +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_CheckpointReader.java @@ -0,0 +1,20 @@ +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +// TF_NewCheckpointReader() return the CheckpointReader that can be use to +// investigate or load the variable from the checkpoint file +@Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class TF_CheckpointReader extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public TF_CheckpointReader() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TF_CheckpointReader(Pointer p) { super(p); } +} diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java index d2382eaca52..d816ab6f832 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java index a24fc795c92..6d095f0b8c7 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java index 017a9e653bd..acf54133d12 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java index beeda28ce31..381a690e1e6 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java index 1d81f725050..779c5030842 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java index 755b4aeaa42..527b6f1109e 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java index a202d65ea5f..7d3bf3597f0 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java index eb169346beb..05bb181a86d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java index 9ba55f9db0b..22a67f6cd2c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java index 715f0f67ca4..084fef91bda 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java index bf17e0d1f5f..9387c172354 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java index f8b7103c57a..a52cff3c905 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java index a2495132444..5753e37ae97 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java index dd5b1259279..8ab407637f5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java index 72d9ea3911f..a85ba6f5c62 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndType.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndType.java new file mode 100644 index 00000000000..315bfa3e01c --- /dev/null +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndType.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +// Information about the shape of a Tensor and its type. +@Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class TF_ShapeAndType extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public TF_ShapeAndType() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TF_ShapeAndType(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TF_ShapeAndType(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public TF_ShapeAndType position(long position) { + return (TF_ShapeAndType)super.position(position); + } + @Override public TF_ShapeAndType getPointer(long i) { + return new TF_ShapeAndType((Pointer)this).offsetAddress(i); + } + + // Number of dimensions. -1 indicates unknown rank. + public native int num_dims(); public native TF_ShapeAndType num_dims(int setter); + // Array of dimensions. -1 indicates unknown dim. + public native @Cast("int64_t*") LongPointer dims(); public native TF_ShapeAndType dims(LongPointer setter); + // The data type. May be 0 to denote unknown type. + public native @Cast("TF_DataType") int dtype(); public native TF_ShapeAndType dtype(int setter); +} diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndTypeList.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndTypeList.java new file mode 100644 index 00000000000..ac959f2acf7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeAndTypeList.java @@ -0,0 +1,33 @@ +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +// A list of TF_ShapeAndType elements.. +@Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class TF_ShapeAndTypeList extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public TF_ShapeAndTypeList() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TF_ShapeAndTypeList(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TF_ShapeAndTypeList(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public TF_ShapeAndTypeList position(long position) { + return (TF_ShapeAndTypeList)super.position(position); + } + @Override public TF_ShapeAndTypeList getPointer(long i) { + return new TF_ShapeAndTypeList((Pointer)this).offsetAddress(i); + } + + public native int num_items(); public native TF_ShapeAndTypeList num_items(int setter); + public native TF_ShapeAndType items(); public native TF_ShapeAndTypeList items(TF_ShapeAndType setter); +} diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java index c2d2c329aa8..3aa5e8156d5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java index af639c7647c..cd9b9928069 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java index 6ecc8d18dd5..66092be629f 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java index 240b0d83e36..2cab61e7d73 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java index 280e4e403a8..3dbdcd468f1 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java index 22712b73a55..f2916801c5f 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java index 6440c91e627..57702a8b716 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java index 51b009cf79a..5ebe9ebfac8 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java index 907bf5d0389..5951c1b1238 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java index 4b35bbf637e..4ee1ae89418 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java index e4ca40d66a3..d3607e4a274 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Tensor.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Tensor.java index fbbeaec6bea..d534b2caa87 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Tensor.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/Tensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java index c5be5971ff1..f867058d6bb 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api.global; @@ -11,7 +11,7 @@ public class tensorflow extends org.tensorflow.internal.c_api.presets.tensorflow { static { Loader.load(); } -// Parsed from tensorflow/tsl/platform/ctstring_internal.h +// Parsed from tsl/platform/ctstring_internal.h /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. @@ -165,7 +165,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, // #endif // TENSORFLOW_TSL_PLATFORM_CTSTRING_INTERNAL_H_ -// Parsed from tensorflow/tsl/platform/ctstring.h +// Parsed from tsl/platform/ctstring.h /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. @@ -274,7 +274,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, // #endif // TENSORFLOW_TSL_PLATFORM_CTSTRING_H_ -// Parsed from tensorflow/tsl/c/tsl_status.h +// Parsed from xla/tsl/c/tsl_status.h /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. @@ -291,8 +291,8 @@ public static native void TF_TString_Copy(TF_TString dst, String src, limitations under the License. ==============================================================================*/ -// #ifndef TENSORFLOW_TSL_C_TSL_STATUS_H_ -// #define TENSORFLOW_TSL_C_TSL_STATUS_H_ +// #ifndef XLA_TSL_C_TSL_STATUS_H_ +// #define XLA_TSL_C_TSL_STATUS_H_ // #ifdef __cplusplus // #endif @@ -351,7 +351,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, // #ifdef __cplusplus /* end extern "C" */ // #endif -// #endif // TENSORFLOW_TSL_C_TSL_STATUS_H_ +// #endif // XLA_TSL_C_TSL_STATUS_H_ // Parsed from tensorflow/c/c_api_macros.h @@ -507,7 +507,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, // #define TENSORFLOW_C_TF_STATUS_H_ // #include "tensorflow/c/c_api_macros.h" -// #include "tsl/c/tsl_status.h" +// #include "xla/tsl/c/tsl_status.h" // #ifdef __cplusplus // Targeting ../TF_Status.java @@ -517,6 +517,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, // -------------------------------------------------------------------------- // TF_Code holds an error code. The enum values here are identical to // corresponding values in error_codes.proto. +// LINT.IfChange public static final int TF_OK = TSL_OK; public static final int TF_CANCELLED = TSL_CANCELLED; public static final int TF_UNKNOWN = TSL_UNKNOWN; @@ -534,6 +535,7 @@ public static native void TF_TString_Copy(TF_TString dst, String src, public static final int TF_INTERNAL = TSL_INTERNAL; public static final int TF_UNAVAILABLE = TSL_UNAVAILABLE; public static final int TF_DATA_LOSS = TSL_DATA_LOSS; +// LINT.ThenChange(//tensorflow/python/py_exception_registry_wrapper.cc) // -------------------------------------------------------------------------- @@ -687,6 +689,13 @@ public static native TF_Tensor TF_NewTensor( Deallocator_Pointer_long_Pointer deallocator, Pointer deallocator_arg); +// Returns the alignment, in bytes, required for allocating aligned tensors. +// +// This can be used in combination with TF_NewTensor to manually manage +// memory while ensuring the resulting tensors satisfy TensorFlow's +// memory alignment preferences. +public static native @Cast("size_t") long TF_TensorDefaultAlignment(); + // Allocate and return a new Tensor. // // This function is an alternative to TF_NewTensor and should be used when @@ -2143,6 +2152,13 @@ public static native TF_ImportGraphDefResults TF_GraphImportGraphDefWithResults( @Const TF_ImportGraphDefOptions options, TF_Status status); +// Has the same behavior as TF_GraphImportGraphDefWithResults, but instead of +// taking in a serialized tensorflow::GraphDef, it takes in a *pointer* to the +// C++ *in memory representation* of the GraphDef, stored in `graph_def->data` +public static native TF_ImportGraphDefResults TF_GraphImportGraphDefWithResultsNoSerialization( + TF_Graph graph, @Const TF_Buffer graph_def, + @Const TF_ImportGraphDefOptions options, TF_Status status); + // Import the graph serialized in `graph_def` into `graph`. // Convenience function for when only return outputs are needed. // @@ -4358,7 +4374,8 @@ public static native void TFE_ContextUpdateServerDefWithTimeout( // This API is for experimental usage and may be subject to change. public static native void TFE_ContextSetServerDefWithTimeout( TFE_Context ctx, int keep_alive_secs, @Const Pointer proto, @Cast("size_t") long proto_len, - @Cast("int64_t") long init_timeout_in_ms, TF_Status status); + @Cast("int64_t") long init_timeout_in_ms, TF_Status status, + @Cast("bool") boolean clear_existing_contexts); // Set server def with retries and timeout. This is helpful for fault-tolerant // initial connection in high-preemption environments, such as @@ -4366,7 +4383,8 @@ public static native void TFE_ContextSetServerDefWithTimeout( // This API is for experimental usage and may be subject to change. public static native void TFE_ContextSetServerDefWithTimeoutAndRetries( TFE_Context ctx, int keep_alive_secs, @Const Pointer proto, @Cast("size_t") long proto_len, - @Cast("int64_t") long init_timeout_in_ms, int retries, TF_Status status); + @Cast("int64_t") long init_timeout_in_ms, int retries, TF_Status status, + @Cast("bool") boolean clear_existing_contexts); // Checks whether a remote worker is alive or not. This will return true even if // the context doesn't exist on the remote worker. @@ -4750,6 +4768,434 @@ public static native void TFE_InitializeLocalOnlyContext(TFE_Context ctx, // #endif // TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_H_ +// Parsed from tensorflow/c/c_api_experimental.h + +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_C_C_API_EXPERIMENTAL_H_ +// #define TENSORFLOW_C_C_API_EXPERIMENTAL_H_ + +// #include +// #include + +// #include "tensorflow/c/c_api.h" +// #include "tensorflow/c/c_api_macros.h" +// #include "tensorflow/c/eager/c_api.h" + +// -------------------------------------------------------------------------- +// Experimental C API for TensorFlow. +// +// The API here is subject to changes in the future. +// -------------------------------------------------------------------------- + +// #ifdef __cplusplus +// #endif + +// When `enable` is true, set +// tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also +// set XLA flag values to prepare for XLA compilation. Otherwise set +// global_jit_level to OFF. +// +// This and the next API are syntax sugar over TF_SetConfig(), and is used by +// clients that cannot read/write the tensorflow.ConfigProto proto. +// TODO: Migrate to TF_CreateConfig() below. +public static native void TF_EnableXLACompilation(TF_SessionOptions options, + @Cast("unsigned char") byte enable); + +// Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the +// value of 'enabled'. Also returns the original value of that flag. +// +// Use in tests to allow XLA to fallback to TF classic. This has global effect. +public static native @Cast("unsigned char") byte TF_SetXlaEnableLazyCompilation( + @Cast("unsigned char") byte enable); +public static native @Cast("unsigned char") byte TF_SetTfXlaCpuGlobalJit(@Cast("unsigned char") byte enable); + +// Sets XLA's auto jit mode according to the specified string, which is parsed +// as if passed in XLA_FLAGS. This has global effect. +public static native void TF_SetXlaAutoJitMode(@Cast("const char*") BytePointer mode); +public static native void TF_SetXlaAutoJitMode(String mode); + +// Returns whether the single GPU or general XLA auto jit optimizations are +// enabled through MarkForCompilationPassFlags. +public static native @Cast("unsigned char") byte TF_GetXlaAutoJitEnabled(); + +// Sets XLA's minimum cluster size. This has global effect. +public static native void TF_SetXlaMinClusterSize(int size); + +// Gets/Sets TF/XLA flag for whether(true) or not(false) to disable constant +// folding. This is for testing to ensure that XLA is being tested rather than +// Tensorflow's CPU implementation through constant folding. +public static native @Cast("unsigned char") byte TF_GetXlaConstantFoldingDisabled(); +public static native void TF_SetXlaConstantFoldingDisabled( + @Cast("unsigned char") byte should_enable); + +// Create a serialized tensorflow.ConfigProto proto, where: +// +// a) ConfigProto.optimizer_options.global_jit_level is set to ON_1 if +// `enable_xla_compilation` is non-zero, and OFF otherwise. +// b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`. +// c) ConfigProto.device_count is set to `num_cpu_devices`. +public static native TF_Buffer TF_CreateConfig( + @Cast("unsigned char") byte enable_xla_compilation, @Cast("unsigned char") byte gpu_memory_allow_growth, + @Cast("unsigned int") int num_cpu_devices); + +// Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level +// is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE +// otherwise. +public static native TF_Buffer TF_CreateRunOptions( + @Cast("unsigned char") byte enable_full_trace); + +// Returns the graph content in a human-readable format, with length set in +// `len`. The format is subject to change in the future. +// The returned string is heap-allocated, and caller should call free() on it. +public static native @Cast("const char*") BytePointer TF_GraphDebugString(TF_Graph graph, + @Cast("size_t*") SizeTPointer len); + +// Returns the function content in a human-readable format, with length set in +// `len`. The format is subject to change in the future. +// The returned string is heap-allocated, and caller should call free() on it. +// +// Do not return const char*, because some foreign language binding +// (e.g. swift) cannot then call free() on the returned pointer. +public static native @Cast("char*") BytePointer TF_FunctionDebugString(TF_Function func, + @Cast("size_t*") SizeTPointer len); + +// On success, dequeues a tensor from a TF-managed FifoQueue given by +// `tensor_id`, associated with `session`. There must be a graph node named +// "fifo_queue_dequeue_", to be executed by this API call. + +// Caller must call TF_DeleteTensor() over the returned tensor. If the queue is +// empty, this call is blocked. +// +// Tensors are enqueued via the corresponding TF enqueue op. +// TODO(hongm): Add support for `timeout_ms`. +public static native TF_Tensor TF_DequeueNamedTensor(TF_Session session, + int tensor_id, + TF_Status status); + +// On success, enqueues `tensor` into a TF-managed FifoQueue given by +// `tensor_id`, associated with `session`. There must be a graph node named +// "fifo_queue_enqueue_", to be executed by this API call. It reads +// from a placeholder node "arg_tensor_enqueue_". +// +// `tensor` is still owned by the caller. This call will be blocked if the queue +// has reached its capacity, and will be unblocked when the queued tensors again +// drop below the capacity due to dequeuing. +// +// Tensors are dequeued via the corresponding TF dequeue op. +// TODO(hongm): Add support for `timeout_ms`. +public static native void TF_EnqueueNamedTensor(TF_Session session, + int tensor_id, + TF_Tensor tensor, + TF_Status status); +// Create a serialized tensorflow.ServerDef proto. +public static native TF_Buffer TFE_GetServerDef(@Cast("const char*") BytePointer text_proto, TF_Status status); +public static native TF_Buffer TFE_GetServerDef(String text_proto, TF_Status status); + +public static native void TF_MakeInternalErrorStatus(TF_Status status, + @Cast("const char*") BytePointer errMsg); +public static native void TF_MakeInternalErrorStatus(TF_Status status, + String errMsg); +// Targeting ../TF_CheckpointReader.java + + +public static native TF_CheckpointReader TF_NewCheckpointReader( + @Cast("const char*") BytePointer filename, TF_Status status); +public static native TF_CheckpointReader TF_NewCheckpointReader( + String filename, TF_Status status); +public static native void TF_DeleteCheckpointReader( + TF_CheckpointReader reader); +public static native int TF_CheckpointReaderHasTensor( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name); +public static native int TF_CheckpointReaderHasTensor( + TF_CheckpointReader reader, String name); +// Get the variable name at the given index +public static native @Cast("const char*") BytePointer TF_CheckpointReaderGetVariable( + TF_CheckpointReader reader, int index); +// Get the number of variable in the checkpoint +public static native int TF_CheckpointReaderSize(TF_CheckpointReader reader); +// Get the DataType of a variable +public static native @Cast("TF_DataType") int TF_CheckpointReaderGetVariableDataType( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name); +public static native @Cast("TF_DataType") int TF_CheckpointReaderGetVariableDataType( + TF_CheckpointReader reader, String name); +// Read the shape of a variable and write to `dims` +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name, @Cast("int64_t*") LongPointer dims, int num_dims, + TF_Status status); +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, String name, @Cast("int64_t*") LongBuffer dims, int num_dims, + TF_Status status); +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name, @Cast("int64_t*") long[] dims, int num_dims, + TF_Status status); +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, String name, @Cast("int64_t*") LongPointer dims, int num_dims, + TF_Status status); +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name, @Cast("int64_t*") LongBuffer dims, int num_dims, + TF_Status status); +public static native void TF_CheckpointReaderGetVariableShape( + TF_CheckpointReader reader, String name, @Cast("int64_t*") long[] dims, int num_dims, + TF_Status status); +// Get the number of dimension of a variable +public static native int TF_CheckpointReaderGetVariableNumDims( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name); +public static native int TF_CheckpointReaderGetVariableNumDims( + TF_CheckpointReader reader, String name); +// Load the weight of a variable +public static native TF_Tensor TF_CheckpointReaderGetTensor( + TF_CheckpointReader reader, @Cast("const char*") BytePointer name, TF_Status status); +public static native TF_Tensor TF_CheckpointReaderGetTensor( + TF_CheckpointReader reader, String name, TF_Status status); +// Targeting ../TF_AttrBuilder.java + + +public static native TF_AttrBuilder TF_NewAttrBuilder(@Cast("const char*") BytePointer op_name); +public static native TF_AttrBuilder TF_NewAttrBuilder(String op_name); +public static native void TF_DeleteAttrBuilder(TF_AttrBuilder builder); +public static native void TF_AttrBuilderSetType(TF_AttrBuilder builder, + @Cast("const char*") BytePointer attr_name, + @Cast("TF_DataType") int value); +public static native void TF_AttrBuilderSetType(TF_AttrBuilder builder, + String attr_name, + @Cast("TF_DataType") int value); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + @Cast("const char*") BytePointer attr_name, + @Cast("const TF_DataType*") IntPointer values, + int num_values); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + String attr_name, + @Cast("const TF_DataType*") IntBuffer values, + int num_values); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + @Cast("const char*") BytePointer attr_name, + @Cast("const TF_DataType*") int[] values, + int num_values); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + String attr_name, + @Cast("const TF_DataType*") IntPointer values, + int num_values); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + @Cast("const char*") BytePointer attr_name, + @Cast("const TF_DataType*") IntBuffer values, + int num_values); +public static native void TF_AttrBuilderSetTypeList(TF_AttrBuilder builder, + String attr_name, + @Cast("const TF_DataType*") int[] values, + int num_values); + +// Checks the tensorflow::NodeDef built via the methods above to see if it can +// run on device_type. +public static native void TF_AttrBuilderCheckCanRunOnDevice( + TF_AttrBuilder builder, @Cast("const char*") BytePointer device_type, TF_Status status); +public static native void TF_AttrBuilderCheckCanRunOnDevice( + TF_AttrBuilder builder, String device_type, TF_Status status); + +// For argument number input_index, fetch the corresponding number_attr that +// needs to be updated with the argument length of the input list. +// Returns nullptr if there is any problem like op_name is not found, or the +// argument does not support this attribute type. +public static native @Cast("const char*") BytePointer TF_GetNumberAttrForOpListInput( + @Cast("const char*") BytePointer op_name, int input_index, TF_Status status); +public static native String TF_GetNumberAttrForOpListInput( + String op_name, int input_index, TF_Status status); + +// Returns 1 if the op is stateful, 0 otherwise. The return value is undefined +// if the status is not ok. +public static native int TF_OpIsStateful(@Cast("const char*") BytePointer op_type, + TF_Status status); +public static native int TF_OpIsStateful(String op_type, + TF_Status status); + +// Platform specific initialization routine. Very few platforms actually require +// this to be called. +public static native void TF_InitMain(@Cast("const char*") BytePointer usage, IntPointer argc, @Cast("char***") @ByPtrPtr PointerPointer argv); +public static native void TF_InitMain(String usage, IntBuffer argc, @Cast("char***") @ByPtrPtr PointerPointer argv); +public static native void TF_InitMain(@Cast("const char*") BytePointer usage, int[] argc, @Cast("char***") @ByPtrPtr PointerPointer argv); +public static native void TF_InitMain(String usage, IntPointer argc, @Cast("char***") @ByPtrPtr PointerPointer argv); +public static native void TF_InitMain(@Cast("const char*") BytePointer usage, IntBuffer argc, @Cast("char***") @ByPtrPtr PointerPointer argv); +public static native void TF_InitMain(String usage, int[] argc, @Cast("char***") @ByPtrPtr PointerPointer argv); + +// Platform-specific implementation to return an unused port. (This should used +// in tests only.) +public static native int TF_PickUnusedPortOrDie(); + +// Fast path method that makes constructing a single scalar tensor require less +// overhead and copies. +public static native TFE_TensorHandle TFE_NewTensorHandleFromScalar( + @Cast("TF_DataType") int data_type, Pointer data, @Cast("size_t") long len, TF_Status status); + +// Specify the server_def that enables collective ops. +// This is different to the above function in that it doesn't create remote +// contexts, and remotely executing ops is not possible. It just enables +// communication for collective ops. +public static native void TFE_EnableCollectiveOps(TFE_Context ctx, + @Const Pointer proto, + @Cast("size_t") long proto_len, + TF_Status status); + +// Aborts all ongoing collectives with the specified status. After abortion, +// subsequent collectives will error with this status immediately. To reset the +// collectives, create a new EagerContext. +// +// This is intended to be used when a peer failure is detected. +public static native void TFE_AbortCollectiveOps(TFE_Context ctx, + TF_Status status); + +// Checks the health of collective ops peers. Explicit health check is needed in +// multi worker collective ops to detect failures in the cluster. If a peer is +// down, collective ops may hang. +public static native void TFE_CollectiveOpsCheckPeerHealth( + TFE_Context ctx, @Cast("const char*") BytePointer task, @Cast("int64_t") long timeout_in_ms, + TF_Status status); +public static native void TFE_CollectiveOpsCheckPeerHealth( + TFE_Context ctx, String task, @Cast("int64_t") long timeout_in_ms, + TF_Status status); +// Targeting ../TF_ShapeAndType.java + + +// Targeting ../TF_ShapeAndTypeList.java + + + +// API for manipulating TF_ShapeAndTypeList objects. +// +public static native TF_ShapeAndTypeList TF_NewShapeAndTypeList( + int num_shapes); +public static native void TF_ShapeAndTypeListSetShape( + TF_ShapeAndTypeList shape_list, int index, @Cast("const int64_t*") LongPointer dims, + int num_dims); +public static native void TF_ShapeAndTypeListSetShape( + TF_ShapeAndTypeList shape_list, int index, @Cast("const int64_t*") LongBuffer dims, + int num_dims); +public static native void TF_ShapeAndTypeListSetShape( + TF_ShapeAndTypeList shape_list, int index, @Cast("const int64_t*") long[] dims, + int num_dims); +public static native void TF_ShapeAndTypeListSetUnknownShape( + TF_ShapeAndTypeList shape_list, int index); +public static native void TF_ShapeAndTypeListSetDtype( + TF_ShapeAndTypeList shape_list, int index, @Cast("TF_DataType") int dtype); +public static native void TF_DeleteShapeAndTypeList( + TF_ShapeAndTypeList shape_list); +public static native void TF_DeleteShapeAndTypeListArray( + @Cast("TF_ShapeAndTypeList**") PointerPointer shape_list_array, int num_items); +public static native void TF_DeleteShapeAndTypeListArray( + @ByPtrPtr TF_ShapeAndTypeList shape_list_array, int num_items); + +// Infer shapes for the given `op`. The arguments mimic the arguments of the +// `shape_inference::InferenceContext` constructor. Note the following: +// - The inputs of the `op` are not used for shape inference. So, it is +// OK to not have the inputs properly set in `op`. See `input_tensors` +// if you want shape inference to consider the input tensors of the +// op for shape inference. +// - The types need not be set in `input_shapes` as it is not used. +// - The number of `input_tensors` should be the same as the number of items +// in `input_shapes`. +// +// The results are returned in `output_shapes` and +// `output_resource_shapes_and_types`. The caller is responsible for freeing the +// memory in these buffers by calling `TF_DeleteShapeAndTypeList`. +public static native void TFE_InferShapes( + TFE_Op op, TF_ShapeAndTypeList input_shapes, @Cast("TF_Tensor**") PointerPointer input_tensors, + TF_ShapeAndTypeList input_tensor_as_shapes, + @Cast("TF_ShapeAndTypeList**") PointerPointer input_resource_shapes_and_types, + @Cast("TF_ShapeAndTypeList**") PointerPointer output_shapes, + @Cast("TF_ShapeAndTypeList***") @ByPtrPtr PointerPointer output_resource_shapes_and_types, TF_Status status); +public static native void TFE_InferShapes( + TFE_Op op, TF_ShapeAndTypeList input_shapes, @ByPtrPtr TF_Tensor input_tensors, + TF_ShapeAndTypeList input_tensor_as_shapes, + @ByPtrPtr TF_ShapeAndTypeList input_resource_shapes_and_types, + @ByPtrPtr TF_ShapeAndTypeList output_shapes, + @Cast("TF_ShapeAndTypeList***") @ByPtrPtr PointerPointer output_resource_shapes_and_types, TF_Status status); + +public static native void TF_ImportGraphDefOptionsSetValidateColocationConstraints( + TF_ImportGraphDefOptions opts, @Cast("unsigned char") byte enable); + +// Load the library specified by library_filename and register the pluggable +// device and related kernels present in that library. This function is not +// supported on embedded on mobile and embedded platforms and will fail if +// called. +// +// Pass "library_filename" to a platform-specific mechanism for dynamically +// loading a library. The rules for determining the exact location of the +// library are platform-specific and are not documented here. +// +// On success, returns the newly created library handle and places OK in status. +// The caller owns the library handle. +// +// On failure, returns nullptr and places an error status in status. +public static native TF_Library TF_LoadPluggableDeviceLibrary( + @Cast("const char*") BytePointer library_filename, TF_Status status); +public static native TF_Library TF_LoadPluggableDeviceLibrary( + String library_filename, TF_Status status); + +// Frees the memory associated with the library handle. +// Does NOT unload the library. +public static native void TF_DeletePluggableDeviceLibraryHandle( + TF_Library lib_handle); + +// Removes `func_name` from `g`. If `func_name` is not in `g`, an error will be +// returned. +public static native void TF_GraphRemoveFunction(TF_Graph g, + @Cast("const char*") BytePointer func_name, + TF_Status status); +public static native void TF_GraphRemoveFunction(TF_Graph g, + String func_name, + TF_Status status); + +// #ifdef __cplusplus /* end extern "C" */ +// #endif + +// #endif // TENSORFLOW_C_C_API_EXPERIMENTAL_H_ + + +// Parsed from tfe_serverdef_stub.h + +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +provided under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_JAVA_TFE_SERVERDEF_STUB_H_ +// #define TENSORFLOW_JAVA_TFE_SERVERDEF_STUB_H_ + +// #ifdef _WIN32 + +// #include "tensorflow/c/c_api.h" +// #include "tensorflow/c/c_api_experimental.h" + +// Include the implementation so that a local definition is always available +// on Windows. +// #include "tfe_serverdef_stub.cc" + +// #endif // _WIN32 + +// #endif // TENSORFLOW_JAVA_TFE_SERVERDEF_STUB_H_ + // Parsed from tfj_graph.h /* Copyright 2024 The TensorFlow Authors. All Rights Reserved. diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfo.java index e71192c47b2..50aa7d93009 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfo.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfoOrBuilder.java index ef9f13504d3..c35a7c6a745 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/AvailableDeviceInfoOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntries.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntries.java index b3ed52d11c0..73be037bfe8 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntries.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntries.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntriesOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntriesOrBuilder.java index b99b30bf045..de029d1d399 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntriesOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntriesOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntry.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntry.java index 0c470285827..efe111640d5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntry.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntry.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntryOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntryOrBuilder.java index 476aae9ca10..fba00ccb7f1 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntryOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BenchmarkEntryOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BfcMemoryMap.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BfcMemoryMap.java index fad0c98b837..e894298881d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BfcMemoryMap.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BfcMemoryMap.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/bfc_memory_map.proto +// source: xla/tsl/protobuf/bfc_memory_map.proto package org.tensorflow.proto; @@ -5091,28 +5091,28 @@ public org.tensorflow.proto.BfcMemoryMap.MemoryDump getDefaultInstanceForType() descriptor; static { java.lang.String[] descriptorData = { - "\n!tsl/protobuf/bfc_memory_map.proto\022\nten" + - "sorflow\"\222\001\n\021MemAllocatorStats\022\022\n\nnum_all" + - "ocs\030\001 \001(\003\022\024\n\014bytes_in_use\030\002 \001(\003\022\031\n\021peak_" + - "bytes_in_use\030\003 \001(\003\022\032\n\022largest_alloc_size" + - "\030\004 \001(\003\022\034\n\024fragmentation_metric\030\005 \001(\002\"\256\001\n" + - "\010MemChunk\022\017\n\007address\030\001 \001(\004\022\014\n\004size\030\002 \001(\003" + - "\022\026\n\016requested_size\030\003 \001(\003\022\013\n\003bin\030\004 \001(\005\022\017\n" + - "\007op_name\030\005 \001(\t\022\026\n\016freed_at_count\030\006 \001(\004\022\024" + - "\n\014action_count\030\007 \001(\004\022\016\n\006in_use\030\010 \001(\010\022\017\n\007" + - "step_id\030\t \001(\004\"\213\001\n\nBinSummary\022\013\n\003bin\030\001 \001(" + - "\005\022\032\n\022total_bytes_in_use\030\002 \001(\003\022\032\n\022total_b" + - "ytes_in_bin\030\003 \001(\003\022\033\n\023total_chunks_in_use" + - "\030\004 \001(\003\022\033\n\023total_chunks_in_bin\030\005 \001(\003\".\n\010S" + - "napShot\022\024\n\014action_count\030\001 \001(\004\022\014\n\004size\030\002 " + - "\001(\003\"\315\001\n\nMemoryDump\022\026\n\016allocator_name\030\001 \001" + - "(\t\022+\n\013bin_summary\030\002 \003(\0132\026.tensorflow.Bin" + - "Summary\022#\n\005chunk\030\003 \003(\0132\024.tensorflow.MemC" + - "hunk\022\'\n\tsnap_shot\030\004 \003(\0132\024.tensorflow.Sna" + - "pShot\022,\n\005stats\030\005 \001(\0132\035.tensorflow.MemAll" + - "ocatorStatsBV\n\024org.tensorflow.protoZ>git" + - "hub.com/google/tsl/tsl/go/protobuf/for_c" + - "ore_protos_go_protob\006proto3" + "\n%xla/tsl/protobuf/bfc_memory_map.proto\022" + + "\ntensorflow\"\222\001\n\021MemAllocatorStats\022\022\n\nnum" + + "_allocs\030\001 \001(\003\022\024\n\014bytes_in_use\030\002 \001(\003\022\031\n\021p" + + "eak_bytes_in_use\030\003 \001(\003\022\032\n\022largest_alloc_" + + "size\030\004 \001(\003\022\034\n\024fragmentation_metric\030\005 \001(\002" + + "\"\256\001\n\010MemChunk\022\017\n\007address\030\001 \001(\004\022\014\n\004size\030\002" + + " \001(\003\022\026\n\016requested_size\030\003 \001(\003\022\013\n\003bin\030\004 \001(" + + "\005\022\017\n\007op_name\030\005 \001(\t\022\026\n\016freed_at_count\030\006 \001" + + "(\004\022\024\n\014action_count\030\007 \001(\004\022\016\n\006in_use\030\010 \001(\010" + + "\022\017\n\007step_id\030\t \001(\004\"\213\001\n\nBinSummary\022\013\n\003bin\030" + + "\001 \001(\005\022\032\n\022total_bytes_in_use\030\002 \001(\003\022\032\n\022tot" + + "al_bytes_in_bin\030\003 \001(\003\022\033\n\023total_chunks_in" + + "_use\030\004 \001(\003\022\033\n\023total_chunks_in_bin\030\005 \001(\003\"" + + ".\n\010SnapShot\022\024\n\014action_count\030\001 \001(\004\022\014\n\004siz" + + "e\030\002 \001(\003\"\315\001\n\nMemoryDump\022\026\n\016allocator_name" + + "\030\001 \001(\t\022+\n\013bin_summary\030\002 \003(\0132\026.tensorflow" + + ".BinSummary\022#\n\005chunk\030\003 \003(\0132\024.tensorflow." + + "MemChunk\022\'\n\tsnap_shot\030\004 \003(\0132\024.tensorflow" + + ".SnapShot\022,\n\005stats\030\005 \001(\0132\035.tensorflow.Me" + + "mAllocatorStatsBV\n\024org.tensorflow.protoZ" + + ">github.com/google/tsl/tsl/go/protobuf/f" + + "or_core_protos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfiguration.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfiguration.java index 8e3f0c9e7b5..19b464ffb52 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfiguration.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfiguration.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfigurationOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfigurationOrBuilder.java index 0f4bc0c0740..112534dc95a 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfigurationOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/BuildConfigurationOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfo.java index 906c5e01a83..3816e55e459 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfo.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfoOrBuilder.java index de66bb23d57..9ede760853d 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CPUInfoOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitId.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitId.java index 3fdd1c804b6..9f6ad5f08bc 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitId.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitId.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitIdOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitIdOrBuilder.java index 1b124825e66..cb78f3bd9d2 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitIdOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CommitIdOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProto.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProto.java index 889a6aa39aa..5dcca1ed5f7 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProto.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProto.java @@ -344,6 +344,71 @@ public interface ExperimentalOrBuilder extends */ boolean getUseTfrt(); + /** + *

    +     * If true, use Pathways with TFRT API for multi host support.
    +     * 
    + * + * bool enable_multi_host = 27; + * @return The enableMultiHost. + */ + boolean getEnableMultiHost(); + + /** + *
    +     * If true, use ifrt as the backend for TFRT. This is only used when
    +     * `use_tfrt` is true.
    +     * 
    + * + * bool tfrt_use_ifrt = 32; + * @return The tfrtUseIfrt. + */ + boolean getTfrtUseIfrt(); + + /** + *
    +     * Port for the Pathways server. Ignored if enable_multi_host=false.
    +     * 
    + * + * int32 backend_server_port = 28; + * @return The backendServerPort. + */ + int getBackendServerPort(); + + /** + *
    +     * If true, TFRT will use TPU specific compiler passes and perform TPU
    +     * specific initialization.
    +     * 
    + * + * bool target_tpu = 29; + * @return The targetTpu. + */ + boolean getTargetTpu(); + + /** + *
    +     * If true, TFRT will use GPU specific compiler passes and perform GPU
    +     * specific initialization.
    +     * 
    + * + * bool target_gpu = 30; + * @return The targetGpu. + */ + boolean getTargetGpu(); + + /** + *
    +     * The threshold to merge small streams in TFRT. The stream with cost
    +     * smaller than the threshold will be merged. Setting it to value 1
    +     * disables all merges.
    +     * 
    + * + * int32 stream_merge_threshold = 31; + * @return The streamMergeThreshold. + */ + int getStreamMergeThreshold(); + /** *
          * Whether functional control flow op lowering should be disabled. This is
    @@ -1032,6 +1097,101 @@ public boolean getUseTfrt() {
           return useTfrt_;
         }
     
    +    public static final int ENABLE_MULTI_HOST_FIELD_NUMBER = 27;
    +    private boolean enableMultiHost_;
    +    /**
    +     * 
    +     * If true, use Pathways with TFRT API for multi host support.
    +     * 
    + * + * bool enable_multi_host = 27; + * @return The enableMultiHost. + */ + @java.lang.Override + public boolean getEnableMultiHost() { + return enableMultiHost_; + } + + public static final int TFRT_USE_IFRT_FIELD_NUMBER = 32; + private boolean tfrtUseIfrt_; + /** + *
    +     * If true, use ifrt as the backend for TFRT. This is only used when
    +     * `use_tfrt` is true.
    +     * 
    + * + * bool tfrt_use_ifrt = 32; + * @return The tfrtUseIfrt. + */ + @java.lang.Override + public boolean getTfrtUseIfrt() { + return tfrtUseIfrt_; + } + + public static final int BACKEND_SERVER_PORT_FIELD_NUMBER = 28; + private int backendServerPort_; + /** + *
    +     * Port for the Pathways server. Ignored if enable_multi_host=false.
    +     * 
    + * + * int32 backend_server_port = 28; + * @return The backendServerPort. + */ + @java.lang.Override + public int getBackendServerPort() { + return backendServerPort_; + } + + public static final int TARGET_TPU_FIELD_NUMBER = 29; + private boolean targetTpu_; + /** + *
    +     * If true, TFRT will use TPU specific compiler passes and perform TPU
    +     * specific initialization.
    +     * 
    + * + * bool target_tpu = 29; + * @return The targetTpu. + */ + @java.lang.Override + public boolean getTargetTpu() { + return targetTpu_; + } + + public static final int TARGET_GPU_FIELD_NUMBER = 30; + private boolean targetGpu_; + /** + *
    +     * If true, TFRT will use GPU specific compiler passes and perform GPU
    +     * specific initialization.
    +     * 
    + * + * bool target_gpu = 30; + * @return The targetGpu. + */ + @java.lang.Override + public boolean getTargetGpu() { + return targetGpu_; + } + + public static final int STREAM_MERGE_THRESHOLD_FIELD_NUMBER = 31; + private int streamMergeThreshold_; + /** + *
    +     * The threshold to merge small streams in TFRT. The stream with cost
    +     * smaller than the threshold will be merged. Setting it to value 1
    +     * disables all merges.
    +     * 
    + * + * int32 stream_merge_threshold = 31; + * @return The streamMergeThreshold. + */ + @java.lang.Override + public int getStreamMergeThreshold() { + return streamMergeThreshold_; + } + public static final int DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER = 21; private boolean disableFunctionalOpsLowering_; /** @@ -1221,6 +1381,24 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (disableEagerExecutorStreamingEnqueue_ != false) { output.writeBool(26, disableEagerExecutorStreamingEnqueue_); } + if (enableMultiHost_ != false) { + output.writeBool(27, enableMultiHost_); + } + if (backendServerPort_ != 0) { + output.writeInt32(28, backendServerPort_); + } + if (targetTpu_ != false) { + output.writeBool(29, targetTpu_); + } + if (targetGpu_ != false) { + output.writeBool(30, targetGpu_); + } + if (streamMergeThreshold_ != 0) { + output.writeInt32(31, streamMergeThreshold_); + } + if (tfrtUseIfrt_ != false) { + output.writeBool(32, tfrtUseIfrt_); + } getUnknownFields().writeTo(output); } @@ -1316,6 +1494,30 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBoolSize(26, disableEagerExecutorStreamingEnqueue_); } + if (enableMultiHost_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(27, enableMultiHost_); + } + if (backendServerPort_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(28, backendServerPort_); + } + if (targetTpu_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(29, targetTpu_); + } + if (targetGpu_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(30, targetGpu_); + } + if (streamMergeThreshold_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(31, streamMergeThreshold_); + } + if (tfrtUseIfrt_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(32, tfrtUseIfrt_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -1367,6 +1569,18 @@ public boolean equals(final java.lang.Object obj) { != other.getXlaFusionAutotunerThresh()) return false; if (getUseTfrt() != other.getUseTfrt()) return false; + if (getEnableMultiHost() + != other.getEnableMultiHost()) return false; + if (getTfrtUseIfrt() + != other.getTfrtUseIfrt()) return false; + if (getBackendServerPort() + != other.getBackendServerPort()) return false; + if (getTargetTpu() + != other.getTargetTpu()) return false; + if (getTargetGpu() + != other.getTargetGpu()) return false; + if (getStreamMergeThreshold() + != other.getStreamMergeThreshold()) return false; if (getDisableFunctionalOpsLowering() != other.getDisableFunctionalOpsLowering()) return false; if (getXlaPreferSingleGraphCluster() @@ -1439,6 +1653,22 @@ public int hashCode() { hash = (37 * hash) + USE_TFRT_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getUseTfrt()); + hash = (37 * hash) + ENABLE_MULTI_HOST_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getEnableMultiHost()); + hash = (37 * hash) + TFRT_USE_IFRT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getTfrtUseIfrt()); + hash = (37 * hash) + BACKEND_SERVER_PORT_FIELD_NUMBER; + hash = (53 * hash) + getBackendServerPort(); + hash = (37 * hash) + TARGET_TPU_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getTargetTpu()); + hash = (37 * hash) + TARGET_GPU_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getTargetGpu()); + hash = (37 * hash) + STREAM_MERGE_THRESHOLD_FIELD_NUMBER; + hash = (53 * hash) + getStreamMergeThreshold(); hash = (37 * hash) + DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getDisableFunctionalOpsLowering()); @@ -1627,6 +1857,18 @@ public Builder clear() { useTfrt_ = false; + enableMultiHost_ = false; + + tfrtUseIfrt_ = false; + + backendServerPort_ = 0; + + targetTpu_ = false; + + targetGpu_ = false; + + streamMergeThreshold_ = 0; + disableFunctionalOpsLowering_ = false; xlaPreferSingleGraphCluster_ = false; @@ -1688,6 +1930,12 @@ public org.tensorflow.proto.ConfigProto.Experimental buildPartial() { result.disableOutputPartitionGraphs_ = disableOutputPartitionGraphs_; result.xlaFusionAutotunerThresh_ = xlaFusionAutotunerThresh_; result.useTfrt_ = useTfrt_; + result.enableMultiHost_ = enableMultiHost_; + result.tfrtUseIfrt_ = tfrtUseIfrt_; + result.backendServerPort_ = backendServerPort_; + result.targetTpu_ = targetTpu_; + result.targetGpu_ = targetGpu_; + result.streamMergeThreshold_ = streamMergeThreshold_; result.disableFunctionalOpsLowering_ = disableFunctionalOpsLowering_; result.xlaPreferSingleGraphCluster_ = xlaPreferSingleGraphCluster_; if (coordinationConfigBuilder_ == null) { @@ -1798,6 +2046,24 @@ public Builder mergeFrom(org.tensorflow.proto.ConfigProto.Experimental other) { if (other.getUseTfrt() != false) { setUseTfrt(other.getUseTfrt()); } + if (other.getEnableMultiHost() != false) { + setEnableMultiHost(other.getEnableMultiHost()); + } + if (other.getTfrtUseIfrt() != false) { + setTfrtUseIfrt(other.getTfrtUseIfrt()); + } + if (other.getBackendServerPort() != 0) { + setBackendServerPort(other.getBackendServerPort()); + } + if (other.getTargetTpu() != false) { + setTargetTpu(other.getTargetTpu()); + } + if (other.getTargetGpu() != false) { + setTargetGpu(other.getTargetGpu()); + } + if (other.getStreamMergeThreshold() != 0) { + setStreamMergeThreshold(other.getStreamMergeThreshold()); + } if (other.getDisableFunctionalOpsLowering() != false) { setDisableFunctionalOpsLowering(other.getDisableFunctionalOpsLowering()); } @@ -1953,6 +2219,36 @@ public Builder mergeFrom( break; } // case 208 + case 216: { + enableMultiHost_ = input.readBool(); + + break; + } // case 216 + case 224: { + backendServerPort_ = input.readInt32(); + + break; + } // case 224 + case 232: { + targetTpu_ = input.readBool(); + + break; + } // case 232 + case 240: { + targetGpu_ = input.readBool(); + + break; + } // case 240 + case 248: { + streamMergeThreshold_ = input.readInt32(); + + break; + } // case 248 + case 256: { + tfrtUseIfrt_ = input.readBool(); + + break; + } // case 256 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -3134,6 +3430,279 @@ public Builder clearUseTfrt() { return this; } + private boolean enableMultiHost_ ; + /** + *
    +       * If true, use Pathways with TFRT API for multi host support.
    +       * 
    + * + * bool enable_multi_host = 27; + * @return The enableMultiHost. + */ + @java.lang.Override + public boolean getEnableMultiHost() { + return enableMultiHost_; + } + /** + *
    +       * If true, use Pathways with TFRT API for multi host support.
    +       * 
    + * + * bool enable_multi_host = 27; + * @param value The enableMultiHost to set. + * @return This builder for chaining. + */ + public Builder setEnableMultiHost(boolean value) { + + enableMultiHost_ = value; + onChanged(); + return this; + } + /** + *
    +       * If true, use Pathways with TFRT API for multi host support.
    +       * 
    + * + * bool enable_multi_host = 27; + * @return This builder for chaining. + */ + public Builder clearEnableMultiHost() { + + enableMultiHost_ = false; + onChanged(); + return this; + } + + private boolean tfrtUseIfrt_ ; + /** + *
    +       * If true, use ifrt as the backend for TFRT. This is only used when
    +       * `use_tfrt` is true.
    +       * 
    + * + * bool tfrt_use_ifrt = 32; + * @return The tfrtUseIfrt. + */ + @java.lang.Override + public boolean getTfrtUseIfrt() { + return tfrtUseIfrt_; + } + /** + *
    +       * If true, use ifrt as the backend for TFRT. This is only used when
    +       * `use_tfrt` is true.
    +       * 
    + * + * bool tfrt_use_ifrt = 32; + * @param value The tfrtUseIfrt to set. + * @return This builder for chaining. + */ + public Builder setTfrtUseIfrt(boolean value) { + + tfrtUseIfrt_ = value; + onChanged(); + return this; + } + /** + *
    +       * If true, use ifrt as the backend for TFRT. This is only used when
    +       * `use_tfrt` is true.
    +       * 
    + * + * bool tfrt_use_ifrt = 32; + * @return This builder for chaining. + */ + public Builder clearTfrtUseIfrt() { + + tfrtUseIfrt_ = false; + onChanged(); + return this; + } + + private int backendServerPort_ ; + /** + *
    +       * Port for the Pathways server. Ignored if enable_multi_host=false.
    +       * 
    + * + * int32 backend_server_port = 28; + * @return The backendServerPort. + */ + @java.lang.Override + public int getBackendServerPort() { + return backendServerPort_; + } + /** + *
    +       * Port for the Pathways server. Ignored if enable_multi_host=false.
    +       * 
    + * + * int32 backend_server_port = 28; + * @param value The backendServerPort to set. + * @return This builder for chaining. + */ + public Builder setBackendServerPort(int value) { + + backendServerPort_ = value; + onChanged(); + return this; + } + /** + *
    +       * Port for the Pathways server. Ignored if enable_multi_host=false.
    +       * 
    + * + * int32 backend_server_port = 28; + * @return This builder for chaining. + */ + public Builder clearBackendServerPort() { + + backendServerPort_ = 0; + onChanged(); + return this; + } + + private boolean targetTpu_ ; + /** + *
    +       * If true, TFRT will use TPU specific compiler passes and perform TPU
    +       * specific initialization.
    +       * 
    + * + * bool target_tpu = 29; + * @return The targetTpu. + */ + @java.lang.Override + public boolean getTargetTpu() { + return targetTpu_; + } + /** + *
    +       * If true, TFRT will use TPU specific compiler passes and perform TPU
    +       * specific initialization.
    +       * 
    + * + * bool target_tpu = 29; + * @param value The targetTpu to set. + * @return This builder for chaining. + */ + public Builder setTargetTpu(boolean value) { + + targetTpu_ = value; + onChanged(); + return this; + } + /** + *
    +       * If true, TFRT will use TPU specific compiler passes and perform TPU
    +       * specific initialization.
    +       * 
    + * + * bool target_tpu = 29; + * @return This builder for chaining. + */ + public Builder clearTargetTpu() { + + targetTpu_ = false; + onChanged(); + return this; + } + + private boolean targetGpu_ ; + /** + *
    +       * If true, TFRT will use GPU specific compiler passes and perform GPU
    +       * specific initialization.
    +       * 
    + * + * bool target_gpu = 30; + * @return The targetGpu. + */ + @java.lang.Override + public boolean getTargetGpu() { + return targetGpu_; + } + /** + *
    +       * If true, TFRT will use GPU specific compiler passes and perform GPU
    +       * specific initialization.
    +       * 
    + * + * bool target_gpu = 30; + * @param value The targetGpu to set. + * @return This builder for chaining. + */ + public Builder setTargetGpu(boolean value) { + + targetGpu_ = value; + onChanged(); + return this; + } + /** + *
    +       * If true, TFRT will use GPU specific compiler passes and perform GPU
    +       * specific initialization.
    +       * 
    + * + * bool target_gpu = 30; + * @return This builder for chaining. + */ + public Builder clearTargetGpu() { + + targetGpu_ = false; + onChanged(); + return this; + } + + private int streamMergeThreshold_ ; + /** + *
    +       * The threshold to merge small streams in TFRT. The stream with cost
    +       * smaller than the threshold will be merged. Setting it to value 1
    +       * disables all merges.
    +       * 
    + * + * int32 stream_merge_threshold = 31; + * @return The streamMergeThreshold. + */ + @java.lang.Override + public int getStreamMergeThreshold() { + return streamMergeThreshold_; + } + /** + *
    +       * The threshold to merge small streams in TFRT. The stream with cost
    +       * smaller than the threshold will be merged. Setting it to value 1
    +       * disables all merges.
    +       * 
    + * + * int32 stream_merge_threshold = 31; + * @param value The streamMergeThreshold to set. + * @return This builder for chaining. + */ + public Builder setStreamMergeThreshold(int value) { + + streamMergeThreshold_ = value; + onChanged(); + return this; + } + /** + *
    +       * The threshold to merge small streams in TFRT. The stream with cost
    +       * smaller than the threshold will be merged. Setting it to value 1
    +       * disables all merges.
    +       * 
    + * + * int32 stream_merge_threshold = 31; + * @return This builder for chaining. + */ + public Builder clearStreamMergeThreshold() { + + streamMergeThreshold_ = 0; + onChanged(); + return this; + } + private boolean disableFunctionalOpsLowering_ ; /** *
    @@ -3988,6 +4557,44 @@ public org.tensorflow.proto.GPUOptionsOrBuilder getGpuOptionsOrBuilder() {
         return getGpuOptions();
       }
     
    +  public static final int PLUGGABLE_DEVICE_OPTIONS_FIELD_NUMBER = 18;
    +  private org.tensorflow.proto.GPUOptions pluggableDeviceOptions_;
    +  /**
    +   * 
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return Whether the pluggableDeviceOptions field is set. + */ + @java.lang.Override + public boolean hasPluggableDeviceOptions() { + return pluggableDeviceOptions_ != null; + } + /** + *
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return The pluggableDeviceOptions. + */ + @java.lang.Override + public org.tensorflow.proto.GPUOptions getPluggableDeviceOptions() { + return pluggableDeviceOptions_ == null ? org.tensorflow.proto.GPUOptions.getDefaultInstance() : pluggableDeviceOptions_; + } + /** + *
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + @java.lang.Override + public org.tensorflow.proto.GPUOptionsOrBuilder getPluggableDeviceOptionsOrBuilder() { + return getPluggableDeviceOptions(); + } + public static final int ALLOW_SOFT_PLACEMENT_FIELD_NUMBER = 7; private boolean allowSoftPlacement_; /** @@ -4284,6 +4891,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (shareClusterDevicesInSession_ != false) { output.writeBool(17, shareClusterDevicesInSession_); } + if (pluggableDeviceOptions_ != null) { + output.writeMessage(18, getPluggableDeviceOptions()); + } getUnknownFields().writeTo(output); } @@ -4371,6 +4981,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBoolSize(17, shareClusterDevicesInSession_); } + if (pluggableDeviceOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(18, getPluggableDeviceOptions()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -4405,6 +5019,11 @@ public boolean equals(final java.lang.Object obj) { if (!getGpuOptions() .equals(other.getGpuOptions())) return false; } + if (hasPluggableDeviceOptions() != other.hasPluggableDeviceOptions()) return false; + if (hasPluggableDeviceOptions()) { + if (!getPluggableDeviceOptions() + .equals(other.getPluggableDeviceOptions())) return false; + } if (getAllowSoftPlacement() != other.getAllowSoftPlacement()) return false; if (getLogDevicePlacement() @@ -4471,6 +5090,10 @@ public int hashCode() { hash = (37 * hash) + GPU_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getGpuOptions().hashCode(); } + if (hasPluggableDeviceOptions()) { + hash = (37 * hash) + PLUGGABLE_DEVICE_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getPluggableDeviceOptions().hashCode(); + } hash = (37 * hash) + ALLOW_SOFT_PLACEMENT_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getAllowSoftPlacement()); @@ -4681,6 +5304,12 @@ public Builder clear() { gpuOptions_ = null; gpuOptionsBuilder_ = null; } + if (pluggableDeviceOptionsBuilder_ == null) { + pluggableDeviceOptions_ = null; + } else { + pluggableDeviceOptions_ = null; + pluggableDeviceOptionsBuilder_ = null; + } allowSoftPlacement_ = false; logDevicePlacement_ = false; @@ -4767,6 +5396,11 @@ public org.tensorflow.proto.ConfigProto buildPartial() { } else { result.gpuOptions_ = gpuOptionsBuilder_.build(); } + if (pluggableDeviceOptionsBuilder_ == null) { + result.pluggableDeviceOptions_ = pluggableDeviceOptions_; + } else { + result.pluggableDeviceOptions_ = pluggableDeviceOptionsBuilder_.build(); + } result.allowSoftPlacement_ = allowSoftPlacement_; result.logDevicePlacement_ = logDevicePlacement_; if (graphOptionsBuilder_ == null) { @@ -4893,6 +5527,9 @@ public Builder mergeFrom(org.tensorflow.proto.ConfigProto other) { if (other.hasGpuOptions()) { mergeGpuOptions(other.getGpuOptions()); } + if (other.hasPluggableDeviceOptions()) { + mergePluggableDeviceOptions(other.getPluggableDeviceOptions()); + } if (other.getAllowSoftPlacement() != false) { setAllowSoftPlacement(other.getAllowSoftPlacement()); } @@ -5053,6 +5690,13 @@ public Builder mergeFrom( break; } // case 136 + case 146: { + input.readMessage( + getPluggableDeviceOptionsFieldBuilder().getBuilder(), + extensionRegistry); + + break; + } // case 146 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -6413,6 +7057,161 @@ public org.tensorflow.proto.GPUOptionsOrBuilder getGpuOptionsOrBuilder() { return gpuOptionsBuilder_; } + private org.tensorflow.proto.GPUOptions pluggableDeviceOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions, org.tensorflow.proto.GPUOptions.Builder, org.tensorflow.proto.GPUOptionsOrBuilder> pluggableDeviceOptionsBuilder_; + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return Whether the pluggableDeviceOptions field is set. + */ + public boolean hasPluggableDeviceOptions() { + return pluggableDeviceOptionsBuilder_ != null || pluggableDeviceOptions_ != null; + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return The pluggableDeviceOptions. + */ + public org.tensorflow.proto.GPUOptions getPluggableDeviceOptions() { + if (pluggableDeviceOptionsBuilder_ == null) { + return pluggableDeviceOptions_ == null ? org.tensorflow.proto.GPUOptions.getDefaultInstance() : pluggableDeviceOptions_; + } else { + return pluggableDeviceOptionsBuilder_.getMessage(); + } + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public Builder setPluggableDeviceOptions(org.tensorflow.proto.GPUOptions value) { + if (pluggableDeviceOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + pluggableDeviceOptions_ = value; + onChanged(); + } else { + pluggableDeviceOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public Builder setPluggableDeviceOptions( + org.tensorflow.proto.GPUOptions.Builder builderForValue) { + if (pluggableDeviceOptionsBuilder_ == null) { + pluggableDeviceOptions_ = builderForValue.build(); + onChanged(); + } else { + pluggableDeviceOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public Builder mergePluggableDeviceOptions(org.tensorflow.proto.GPUOptions value) { + if (pluggableDeviceOptionsBuilder_ == null) { + if (pluggableDeviceOptions_ != null) { + pluggableDeviceOptions_ = + org.tensorflow.proto.GPUOptions.newBuilder(pluggableDeviceOptions_).mergeFrom(value).buildPartial(); + } else { + pluggableDeviceOptions_ = value; + } + onChanged(); + } else { + pluggableDeviceOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public Builder clearPluggableDeviceOptions() { + if (pluggableDeviceOptionsBuilder_ == null) { + pluggableDeviceOptions_ = null; + onChanged(); + } else { + pluggableDeviceOptions_ = null; + pluggableDeviceOptionsBuilder_ = null; + } + + return this; + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public org.tensorflow.proto.GPUOptions.Builder getPluggableDeviceOptionsBuilder() { + + onChanged(); + return getPluggableDeviceOptionsFieldBuilder().getBuilder(); + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + public org.tensorflow.proto.GPUOptionsOrBuilder getPluggableDeviceOptionsOrBuilder() { + if (pluggableDeviceOptionsBuilder_ != null) { + return pluggableDeviceOptionsBuilder_.getMessageOrBuilder(); + } else { + return pluggableDeviceOptions_ == null ? + org.tensorflow.proto.GPUOptions.getDefaultInstance() : pluggableDeviceOptions_; + } + } + /** + *
    +     * Options that apply to pluggable devices.
    +     * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions, org.tensorflow.proto.GPUOptions.Builder, org.tensorflow.proto.GPUOptionsOrBuilder> + getPluggableDeviceOptionsFieldBuilder() { + if (pluggableDeviceOptionsBuilder_ == null) { + pluggableDeviceOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions, org.tensorflow.proto.GPUOptions.Builder, org.tensorflow.proto.GPUOptionsOrBuilder>( + getPluggableDeviceOptions(), + getParentForChildren(), + isClean()); + pluggableDeviceOptions_ = null; + } + return pluggableDeviceOptionsBuilder_; + } + private boolean allowSoftPlacement_ ; /** *
    diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtoOrBuilder.java
    index d158b44e08f..29a052555c6 100644
    --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtoOrBuilder.java
    +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtoOrBuilder.java
    @@ -341,6 +341,33 @@ org.tensorflow.proto.ThreadPoolOptionProtoOrBuilder getSessionInterOpThreadPoolO
        */
       org.tensorflow.proto.GPUOptionsOrBuilder getGpuOptionsOrBuilder();
     
    +  /**
    +   * 
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return Whether the pluggableDeviceOptions field is set. + */ + boolean hasPluggableDeviceOptions(); + /** + *
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + * @return The pluggableDeviceOptions. + */ + org.tensorflow.proto.GPUOptions getPluggableDeviceOptions(); + /** + *
    +   * Options that apply to pluggable devices.
    +   * 
    + * + * .tensorflow.GPUOptions pluggable_device_options = 18; + */ + org.tensorflow.proto.GPUOptionsOrBuilder getPluggableDeviceOptionsOrBuilder(); + /** *
        * Whether soft placement is allowed. If allow_soft_placement is true,
    diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtos.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtos.java
    index 9f8c8190e60..ee8eb70f710 100644
    --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtos.java
    +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ConfigProtos.java
    @@ -29,6 +29,11 @@ public static void registerAllExtensions(
       static final 
         com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable;
    +  static final com.google.protobuf.Descriptors.Descriptor
    +    internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor;
    +  static final 
    +    com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
    +      internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_fieldAccessorTable;
       static final com.google.protobuf.Descriptors.Descriptor
         internal_static_tensorflow_OptimizerOptions_descriptor;
       static final 
    @@ -127,7 +132,7 @@ public static void registerAllExtensions(
           "obuf/debug.proto\032.tensorflow/core/protob" +
           "uf/rewriter_config.proto\032*tensorflow/cor" +
           "e/protobuf/rpc_options.proto\032&tsl/protob" +
    -      "uf/coordination_config.proto\"\352\007\n\nGPUOpti" +
    +      "uf/coordination_config.proto\"\211\n\n\nGPUOpti" +
           "ons\022\'\n\037per_process_gpu_memory_fraction\030\001" +
           " \001(\001\022\024\n\014allow_growth\030\004 \001(\010\022\026\n\016allocator_" +
           "type\030\002 \001(\t\022\037\n\027deferred_deletion_bytes\030\003 " +
    @@ -135,7 +140,7 @@ public static void registerAllExtensions(
           "ing_active_delay_usecs\030\006 \001(\005\022$\n\034polling_" +
           "inactive_delay_msecs\030\007 \001(\005\022\034\n\024force_gpu_" +
           "compatible\030\010 \001(\010\0229\n\014experimental\030\t \001(\0132#" +
    -      ".tensorflow.GPUOptions.Experimental\032\243\005\n\014" +
    +      ".tensorflow.GPUOptions.Experimental\032\302\007\n\014" +
           "Experimental\022K\n\017virtual_devices\030\001 \003(\01322." +
           "tensorflow.GPUOptions.Experimental.Virtu" +
           "alDevices\022#\n\033num_virtual_devices_per_gpu" +
    @@ -150,123 +155,135 @@ public static void registerAllExtensions(
           "llow_retry_on_allocation_failure\030\014 \001(\010\022 " +
           "\n\030gpu_host_mem_limit_in_mb\030\r \001(\002\022$\n\034gpu_" +
           "host_mem_disallow_growth\030\016 \001(\010\022$\n\034gpu_sy" +
    -      "stem_memory_size_in_mb\030\020 \001(\005\032S\n\016VirtualD" +
    -      "evices\022\027\n\017memory_limit_mb\030\001 \003(\002\022\020\n\010prior" +
    -      "ity\030\002 \003(\005\022\026\n\016device_ordinal\030\003 \003(\005\"\235\003\n\020Op" +
    -      "timizerOptions\022+\n#do_common_subexpressio" +
    -      "n_elimination\030\001 \001(\010\022\033\n\023do_constant_foldi" +
    -      "ng\030\002 \001(\010\022$\n\034max_folded_constant_in_bytes" +
    -      "\030\006 \001(\003\022\034\n\024do_function_inlining\030\004 \001(\010\0225\n\t" +
    -      "opt_level\030\003 \001(\0162\".tensorflow.OptimizerOp" +
    -      "tions.Level\022E\n\020global_jit_level\030\005 \001(\0162+." +
    -      "tensorflow.OptimizerOptions.GlobalJitLev" +
    -      "el\022\026\n\016cpu_global_jit\030\007 \001(\010\" \n\005Level\022\006\n\002L" +
    -      "1\020\000\022\017\n\002L0\020\377\377\377\377\377\377\377\377\377\001\"C\n\016GlobalJitLevel\022\013" +
    -      "\n\007DEFAULT\020\000\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022" +
    -      "\010\n\004ON_2\020\002\"\356\002\n\014GraphOptions\022\036\n\026enable_rec" +
    -      "v_scheduling\030\002 \001(\010\0227\n\021optimizer_options\030" +
    -      "\003 \001(\0132\034.tensorflow.OptimizerOptions\022\030\n\020b" +
    -      "uild_cost_model\030\004 \001(\003\022\036\n\026build_cost_mode" +
    -      "l_after\030\t \001(\003\022\024\n\014infer_shapes\030\005 \001(\010\022\032\n\022p" +
    -      "lace_pruned_graph\030\006 \001(\010\022 \n\030enable_bfloat" +
    -      "16_sendrecv\030\007 \001(\010\022\025\n\rtimeline_step\030\010 \001(\005" +
    -      "\0223\n\017rewrite_options\030\n \001(\0132\032.tensorflow.R" +
    -      "ewriterConfigJ\004\010\001\020\002R%skip_common_subexpr" +
    -      "ession_elimination\"A\n\025ThreadPoolOptionPr" +
    -      "oto\022\023\n\013num_threads\030\001 \001(\005\022\023\n\013global_name\030" +
    -      "\002 \001(\t\"0\n\017SessionMetadata\022\014\n\004name\030\001 \001(\t\022\017" +
    -      "\n\007version\030\002 \001(\003\"\225\017\n\013ConfigProto\022>\n\014devic" +
    -      "e_count\030\001 \003(\0132(.tensorflow.ConfigProto.D" +
    -      "eviceCountEntry\022$\n\034intra_op_parallelism_" +
    -      "threads\030\002 \001(\005\022$\n\034inter_op_parallelism_th" +
    -      "reads\030\005 \001(\005\022\037\n\027use_per_session_threads\030\t" +
    -      " \001(\010\022G\n\034session_inter_op_thread_pool\030\014 \003" +
    -      "(\0132!.tensorflow.ThreadPoolOptionProto\022\030\n" +
    -      "\020placement_period\030\003 \001(\005\022\026\n\016device_filter" +
    -      "s\030\004 \003(\t\022+\n\013gpu_options\030\006 \001(\0132\026.tensorflo" +
    -      "w.GPUOptions\022\034\n\024allow_soft_placement\030\007 \001" +
    -      "(\010\022\034\n\024log_device_placement\030\010 \001(\010\022/\n\rgrap" +
    -      "h_options\030\n \001(\0132\030.tensorflow.GraphOption" +
    -      "s\022\037\n\027operation_timeout_in_ms\030\013 \001(\003\022+\n\013rp" +
    -      "c_options\030\r \001(\0132\026.tensorflow.RPCOptions\022" +
    -      "+\n\013cluster_def\030\016 \001(\0132\026.tensorflow.Cluste" +
    -      "rDef\022\035\n\025isolate_session_state\030\017 \001(\010\022(\n s" +
    -      "hare_cluster_devices_in_session\030\021 \001(\010\022:\n" +
    -      "\014experimental\030\020 \001(\0132$.tensorflow.ConfigP" +
    -      "roto.Experimental\0322\n\020DeviceCountEntry\022\013\n" +
    -      "\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001\032\217\t\n\014Experi" +
    -      "mental\022\037\n\027collective_group_leader\030\001 \001(\t\022" +
    -      "\025\n\rexecutor_type\030\003 \001(\t\022\032\n\022recv_buf_max_c" +
    -      "hunk\030\004 \001(\005\022\031\n\021use_numa_affinity\030\005 \001(\010\0225\n" +
    -      "-collective_deterministic_sequential_exe" +
    -      "cution\030\006 \001(\010\022\027\n\017collective_nccl\030\007 \001(\010\0226\n" +
    -      ".share_session_state_in_clusterspec_prop" +
    -      "agation\030\010 \001(\010\022\037\n\027disable_thread_spinning" +
    -      "\030\t \001(\010\022(\n share_cluster_devices_in_sessi" +
    -      "on\030\n \001(\010\0225\n\020session_metadata\030\013 \001(\0132\033.ten" +
    -      "sorflow.SessionMetadata\022!\n\031optimize_for_" +
    -      "static_graph\030\014 \001(\010\022\032\n\022enable_mlir_bridge" +
    -      "\030\r \001(\010\022S\n\023mlir_bridge_rollout\030\021 \001(\01626.te" +
    -      "nsorflow.ConfigProto.Experimental.MlirBr" +
    -      "idgeRollout\022&\n\036enable_mlir_graph_optimiz" +
    -      "ation\030\020 \001(\010\022\'\n\037disable_output_partition_" +
    -      "graphs\030\016 \001(\010\022#\n\033xla_fusion_autotuner_thr" +
    -      "esh\030\017 \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\'\n\037disable_f" +
    -      "unctional_ops_lowering\030\025 \001(\010\022\'\n\037xla_pref" +
    -      "er_single_graph_cluster\030\026 \001(\010\022B\n\023coordin" +
    -      "ation_config\030\027 \001(\0132%.tensorflow.Coordina" +
    -      "tionServiceConfig\022)\n!disable_optimize_fo" +
    -      "r_static_graph\030\030 \001(\010\0220\n(disable_eager_ex" +
    -      "ecutor_streaming_enqueue\030\032 \001(\010\"\336\001\n\021MlirB" +
    -      "ridgeRollout\022#\n\037MLIR_BRIDGE_ROLLOUT_UNSP" +
    -      "ECIFIED\020\000\022\037\n\033MLIR_BRIDGE_ROLLOUT_ENABLED" +
    -      "\020\001\022 \n\034MLIR_BRIDGE_ROLLOUT_DISABLED\020\002\"\004\010\003" +
    -      "\020\003\"\004\010\004\020\004*%MLIR_BRIDGE_ROLLOUT_SAFE_MODE_" +
    -      "ENABLED*.MLIR_BRIDGE_ROLLOUT_SAFE_MODE_F" +
    -      "ALLBACK_ENABLEDJ\004\010\002\020\003J\004\010\023\020\024J\004\010\024\020\025J\004\010\031\020\032\"" +
    -      "\341\004\n\nRunOptions\0226\n\013trace_level\030\001 \001(\0162!.te" +
    -      "nsorflow.RunOptions.TraceLevel\022\025\n\rtimeou" +
    -      "t_in_ms\030\002 \001(\003\022\034\n\024inter_op_thread_pool\030\003 " +
    -      "\001(\005\022\037\n\027output_partition_graphs\030\005 \001(\010\022/\n\r" +
    -      "debug_options\030\006 \001(\0132\030.tensorflow.DebugOp" +
    -      "tions\022*\n\"report_tensor_allocations_upon_" +
    -      "oom\030\007 \001(\010\0229\n\014experimental\030\010 \001(\0132#.tensor" +
    -      "flow.RunOptions.Experimental\032\322\001\n\014Experim" +
    -      "ental\022\034\n\024collective_graph_key\030\001 \001(\003\022\034\n\024u" +
    -      "se_run_handler_pool\030\002 \001(\010\022[\n\030run_handler" +
    -      "_pool_options\030\003 \001(\01329.tensorflow.RunOpti" +
    -      "ons.Experimental.RunHandlerPoolOptions\032)" +
    -      "\n\025RunHandlerPoolOptions\022\020\n\010priority\030\001 \001(" +
    -      "\003\"R\n\nTraceLevel\022\014\n\010NO_TRACE\020\000\022\022\n\016SOFTWAR" +
    -      "E_TRACE\020\001\022\022\n\016HARDWARE_TRACE\020\002\022\016\n\nFULL_TR" +
    -      "ACE\020\003J\004\010\004\020\005\"\276\003\n\013RunMetadata\022)\n\nstep_stat" +
    -      "s\030\001 \001(\0132\025.tensorflow.StepStats\022,\n\ncost_g" +
    -      "raph\030\002 \001(\0132\030.tensorflow.CostGraphDef\022.\n\020" +
    -      "partition_graphs\030\003 \003(\0132\024.tensorflow.Grap" +
    -      "hDef\022?\n\017function_graphs\030\004 \003(\0132&.tensorfl" +
    -      "ow.RunMetadata.FunctionGraphs\0225\n\020session" +
    -      "_metadata\030\005 \001(\0132\033.tensorflow.SessionMeta" +
    -      "data\032\255\001\n\016FunctionGraphs\022.\n\020partition_gra" +
    -      "phs\030\001 \003(\0132\024.tensorflow.GraphDef\0224\n\026pre_o" +
    -      "ptimization_graph\030\002 \001(\0132\024.tensorflow.Gra" +
    -      "phDef\0225\n\027post_optimization_graph\030\003 \001(\0132\024" +
    -      ".tensorflow.GraphDef\":\n\020TensorConnection" +
    -      "\022\023\n\013from_tensor\030\001 \001(\t\022\021\n\tto_tensor\030\002 \001(\t" +
    -      "\"\260\003\n\017CallableOptions\022\014\n\004feed\030\001 \003(\t\022\r\n\005fe" +
    -      "tch\030\002 \003(\t\022\016\n\006target\030\003 \003(\t\022+\n\013run_options" +
    -      "\030\004 \001(\0132\026.tensorflow.RunOptions\0227\n\021tensor" +
    -      "_connection\030\005 \003(\0132\034.tensorflow.TensorCon" +
    -      "nection\022B\n\014feed_devices\030\006 \003(\0132,.tensorfl" +
    -      "ow.CallableOptions.FeedDevicesEntry\022D\n\rf" +
    -      "etch_devices\030\007 \003(\0132-.tensorflow.Callable" +
    -      "Options.FetchDevicesEntry\022\027\n\017fetch_skip_" +
    -      "sync\030\010 \001(\010\0322\n\020FeedDevicesEntry\022\013\n\003key\030\001 " +
    -      "\001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0323\n\021FetchDevicesEn" +
    -      "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\200\001\n\024" +
    -      "org.tensorflow.protoB\014ConfigProtosP\001ZUgi" +
    -      "thub.com/tensorflow/tensorflow/tensorflo" +
    -      "w/go/core/protobuf/for_core_protos_go_pr" +
    -      "oto\370\001\001b\006proto3"
    +      "stem_memory_size_in_mb\030\020 \001(\005\022.\n&populate" +
    +      "_pjrt_gpu_client_creation_info\030\021 \001(\010\022\017\n\007" +
    +      "node_id\030\022 \001(\005\022T\n\024stream_merge_options\030\023 " +
    +      "\001(\01326.tensorflow.GPUOptions.Experimental" +
    +      ".StreamMergeOptions\032S\n\016VirtualDevices\022\027\n" +
    +      "\017memory_limit_mb\030\001 \003(\002\022\020\n\010priority\030\002 \003(\005" +
    +      "\022\026\n\016device_ordinal\030\003 \003(\005\032\205\001\n\022StreamMerge" +
    +      "Options\022#\n\033merge_host_to_device_stream\030\001" +
    +      " \001(\010\022#\n\033merge_device_to_host_stream\030\002 \001(" +
    +      "\010\022%\n\035merge_device_to_device_stream\030\003 \001(\010" +
    +      "\"\235\003\n\020OptimizerOptions\022+\n#do_common_subex" +
    +      "pression_elimination\030\001 \001(\010\022\033\n\023do_constan" +
    +      "t_folding\030\002 \001(\010\022$\n\034max_folded_constant_i" +
    +      "n_bytes\030\006 \001(\003\022\034\n\024do_function_inlining\030\004 " +
    +      "\001(\010\0225\n\topt_level\030\003 \001(\0162\".tensorflow.Opti" +
    +      "mizerOptions.Level\022E\n\020global_jit_level\030\005" +
    +      " \001(\0162+.tensorflow.OptimizerOptions.Globa" +
    +      "lJitLevel\022\026\n\016cpu_global_jit\030\007 \001(\010\" \n\005Lev" +
    +      "el\022\006\n\002L1\020\000\022\017\n\002L0\020\377\377\377\377\377\377\377\377\377\001\"C\n\016GlobalJit" +
    +      "Level\022\013\n\007DEFAULT\020\000\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001\022\010\n\004" +
    +      "ON_1\020\001\022\010\n\004ON_2\020\002\"\356\002\n\014GraphOptions\022\036\n\026ena" +
    +      "ble_recv_scheduling\030\002 \001(\010\0227\n\021optimizer_o" +
    +      "ptions\030\003 \001(\0132\034.tensorflow.OptimizerOptio" +
    +      "ns\022\030\n\020build_cost_model\030\004 \001(\003\022\036\n\026build_co" +
    +      "st_model_after\030\t \001(\003\022\024\n\014infer_shapes\030\005 \001" +
    +      "(\010\022\032\n\022place_pruned_graph\030\006 \001(\010\022 \n\030enable" +
    +      "_bfloat16_sendrecv\030\007 \001(\010\022\025\n\rtimeline_ste" +
    +      "p\030\010 \001(\005\0223\n\017rewrite_options\030\n \001(\0132\032.tenso" +
    +      "rflow.RewriterConfigJ\004\010\001\020\002R%skip_common_" +
    +      "subexpression_elimination\"A\n\025ThreadPoolO" +
    +      "ptionProto\022\023\n\013num_threads\030\001 \001(\005\022\023\n\013globa" +
    +      "l_name\030\002 \001(\t\"0\n\017SessionMetadata\022\014\n\004name\030" +
    +      "\001 \001(\t\022\017\n\007version\030\002 \001(\003\"\346\020\n\013ConfigProto\022>" +
    +      "\n\014device_count\030\001 \003(\0132(.tensorflow.Config" +
    +      "Proto.DeviceCountEntry\022$\n\034intra_op_paral" +
    +      "lelism_threads\030\002 \001(\005\022$\n\034inter_op_paralle" +
    +      "lism_threads\030\005 \001(\005\022\037\n\027use_per_session_th" +
    +      "reads\030\t \001(\010\022G\n\034session_inter_op_thread_p" +
    +      "ool\030\014 \003(\0132!.tensorflow.ThreadPoolOptionP" +
    +      "roto\022\030\n\020placement_period\030\003 \001(\005\022\026\n\016device" +
    +      "_filters\030\004 \003(\t\022+\n\013gpu_options\030\006 \001(\0132\026.te" +
    +      "nsorflow.GPUOptions\0228\n\030pluggable_device_" +
    +      "options\030\022 \001(\0132\026.tensorflow.GPUOptions\022\034\n" +
    +      "\024allow_soft_placement\030\007 \001(\010\022\034\n\024log_devic" +
    +      "e_placement\030\010 \001(\010\022/\n\rgraph_options\030\n \001(\013" +
    +      "2\030.tensorflow.GraphOptions\022\037\n\027operation_" +
    +      "timeout_in_ms\030\013 \001(\003\022+\n\013rpc_options\030\r \001(\013" +
    +      "2\026.tensorflow.RPCOptions\022+\n\013cluster_def\030" +
    +      "\016 \001(\0132\026.tensorflow.ClusterDef\022\035\n\025isolate" +
    +      "_session_state\030\017 \001(\010\022(\n share_cluster_de" +
    +      "vices_in_session\030\021 \001(\010\022:\n\014experimental\030\020" +
    +      " \001(\0132$.tensorflow.ConfigProto.Experiment" +
    +      "al\0322\n\020DeviceCountEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005v" +
    +      "alue\030\002 \001(\005:\0028\001\032\246\n\n\014Experimental\022\037\n\027colle" +
    +      "ctive_group_leader\030\001 \001(\t\022\025\n\rexecutor_typ" +
    +      "e\030\003 \001(\t\022\032\n\022recv_buf_max_chunk\030\004 \001(\005\022\031\n\021u" +
    +      "se_numa_affinity\030\005 \001(\010\0225\n-collective_det" +
    +      "erministic_sequential_execution\030\006 \001(\010\022\027\n" +
    +      "\017collective_nccl\030\007 \001(\010\0226\n.share_session_" +
    +      "state_in_clusterspec_propagation\030\010 \001(\010\022\037" +
    +      "\n\027disable_thread_spinning\030\t \001(\010\022(\n share" +
    +      "_cluster_devices_in_session\030\n \001(\010\0225\n\020ses" +
    +      "sion_metadata\030\013 \001(\0132\033.tensorflow.Session" +
    +      "Metadata\022!\n\031optimize_for_static_graph\030\014 " +
    +      "\001(\010\022\032\n\022enable_mlir_bridge\030\r \001(\010\022S\n\023mlir_" +
    +      "bridge_rollout\030\021 \001(\01626.tensorflow.Config" +
    +      "Proto.Experimental.MlirBridgeRollout\022&\n\036" +
    +      "enable_mlir_graph_optimization\030\020 \001(\010\022\'\n\037" +
    +      "disable_output_partition_graphs\030\016 \001(\010\022#\n" +
    +      "\033xla_fusion_autotuner_thresh\030\017 \001(\003\022\020\n\010us" +
    +      "e_tfrt\030\022 \001(\010\022\031\n\021enable_multi_host\030\033 \001(\010\022" +
    +      "\025\n\rtfrt_use_ifrt\030  \001(\010\022\033\n\023backend_server" +
    +      "_port\030\034 \001(\005\022\022\n\ntarget_tpu\030\035 \001(\010\022\022\n\ntarge" +
    +      "t_gpu\030\036 \001(\010\022\036\n\026stream_merge_threshold\030\037 " +
    +      "\001(\005\022\'\n\037disable_functional_ops_lowering\030\025" +
    +      " \001(\010\022\'\n\037xla_prefer_single_graph_cluster\030" +
    +      "\026 \001(\010\022B\n\023coordination_config\030\027 \001(\0132%.ten" +
    +      "sorflow.CoordinationServiceConfig\022)\n!dis" +
    +      "able_optimize_for_static_graph\030\030 \001(\010\0220\n(" +
    +      "disable_eager_executor_streaming_enqueue" +
    +      "\030\032 \001(\010\"\336\001\n\021MlirBridgeRollout\022#\n\037MLIR_BRI" +
    +      "DGE_ROLLOUT_UNSPECIFIED\020\000\022\037\n\033MLIR_BRIDGE" +
    +      "_ROLLOUT_ENABLED\020\001\022 \n\034MLIR_BRIDGE_ROLLOU" +
    +      "T_DISABLED\020\002\"\004\010\003\020\003\"\004\010\004\020\004*%MLIR_BRIDGE_RO" +
    +      "LLOUT_SAFE_MODE_ENABLED*.MLIR_BRIDGE_ROL" +
    +      "LOUT_SAFE_MODE_FALLBACK_ENABLEDJ\004\010\002\020\003J\004\010" +
    +      "\023\020\024J\004\010\024\020\025J\004\010\031\020\032\"\341\004\n\nRunOptions\0226\n\013trace_" +
    +      "level\030\001 \001(\0162!.tensorflow.RunOptions.Trac" +
    +      "eLevel\022\025\n\rtimeout_in_ms\030\002 \001(\003\022\034\n\024inter_o" +
    +      "p_thread_pool\030\003 \001(\005\022\037\n\027output_partition_" +
    +      "graphs\030\005 \001(\010\022/\n\rdebug_options\030\006 \001(\0132\030.te" +
    +      "nsorflow.DebugOptions\022*\n\"report_tensor_a" +
    +      "llocations_upon_oom\030\007 \001(\010\0229\n\014experimenta" +
    +      "l\030\010 \001(\0132#.tensorflow.RunOptions.Experime" +
    +      "ntal\032\322\001\n\014Experimental\022\034\n\024collective_grap" +
    +      "h_key\030\001 \001(\003\022\034\n\024use_run_handler_pool\030\002 \001(" +
    +      "\010\022[\n\030run_handler_pool_options\030\003 \001(\01329.te" +
    +      "nsorflow.RunOptions.Experimental.RunHand" +
    +      "lerPoolOptions\032)\n\025RunHandlerPoolOptions\022" +
    +      "\020\n\010priority\030\001 \001(\003\"R\n\nTraceLevel\022\014\n\010NO_TR" +
    +      "ACE\020\000\022\022\n\016SOFTWARE_TRACE\020\001\022\022\n\016HARDWARE_TR" +
    +      "ACE\020\002\022\016\n\nFULL_TRACE\020\003J\004\010\004\020\005\"\276\003\n\013RunMetad" +
    +      "ata\022)\n\nstep_stats\030\001 \001(\0132\025.tensorflow.Ste" +
    +      "pStats\022,\n\ncost_graph\030\002 \001(\0132\030.tensorflow." +
    +      "CostGraphDef\022.\n\020partition_graphs\030\003 \003(\0132\024" +
    +      ".tensorflow.GraphDef\022?\n\017function_graphs\030" +
    +      "\004 \003(\0132&.tensorflow.RunMetadata.FunctionG" +
    +      "raphs\0225\n\020session_metadata\030\005 \001(\0132\033.tensor" +
    +      "flow.SessionMetadata\032\255\001\n\016FunctionGraphs\022" +
    +      ".\n\020partition_graphs\030\001 \003(\0132\024.tensorflow.G" +
    +      "raphDef\0224\n\026pre_optimization_graph\030\002 \001(\0132" +
    +      "\024.tensorflow.GraphDef\0225\n\027post_optimizati" +
    +      "on_graph\030\003 \001(\0132\024.tensorflow.GraphDef\":\n\020" +
    +      "TensorConnection\022\023\n\013from_tensor\030\001 \001(\t\022\021\n" +
    +      "\tto_tensor\030\002 \001(\t\"\260\003\n\017CallableOptions\022\014\n\004" +
    +      "feed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(\t\022\016\n\006target\030\003 \003(" +
    +      "\t\022+\n\013run_options\030\004 \001(\0132\026.tensorflow.RunO" +
    +      "ptions\0227\n\021tensor_connection\030\005 \003(\0132\034.tens" +
    +      "orflow.TensorConnection\022B\n\014feed_devices\030" +
    +      "\006 \003(\0132,.tensorflow.CallableOptions.FeedD" +
    +      "evicesEntry\022D\n\rfetch_devices\030\007 \003(\0132-.ten" +
    +      "sorflow.CallableOptions.FetchDevicesEntr" +
    +      "y\022\027\n\017fetch_skip_sync\030\010 \001(\010\0322\n\020FeedDevice" +
    +      "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0323" +
    +      "\n\021FetchDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu" +
    +      "e\030\002 \001(\t:\0028\001B\200\001\n\024org.tensorflow.protoB\014Co" +
    +      "nfigProtosP\001ZUgithub.com/tensorflow/tens" +
    +      "orflow/tensorflow/go/core/protobuf/for_c" +
    +      "ore_protos_go_proto\370\001\001b\006proto3"
         };
         descriptor = com.google.protobuf.Descriptors.FileDescriptor
           .internalBuildGeneratedFileFrom(descriptorData,
    @@ -291,13 +308,19 @@ public static void registerAllExtensions(
         internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_GPUOptions_Experimental_descriptor,
    -        new java.lang.String[] { "VirtualDevices", "NumVirtualDevicesPerGpu", "UseUnifiedMemory", "NumDevToDevCopyStreams", "CollectiveRingOrder", "TimestampedAllocator", "KernelTrackerMaxInterval", "KernelTrackerMaxBytes", "KernelTrackerMaxPending", "InternalFragmentationFraction", "UseCudaMallocAsync", "DisallowRetryOnAllocationFailure", "GpuHostMemLimitInMb", "GpuHostMemDisallowGrowth", "GpuSystemMemorySizeInMb", });
    +        new java.lang.String[] { "VirtualDevices", "NumVirtualDevicesPerGpu", "UseUnifiedMemory", "NumDevToDevCopyStreams", "CollectiveRingOrder", "TimestampedAllocator", "KernelTrackerMaxInterval", "KernelTrackerMaxBytes", "KernelTrackerMaxPending", "InternalFragmentationFraction", "UseCudaMallocAsync", "DisallowRetryOnAllocationFailure", "GpuHostMemLimitInMb", "GpuHostMemDisallowGrowth", "GpuSystemMemorySizeInMb", "PopulatePjrtGpuClientCreationInfo", "NodeId", "StreamMergeOptions", });
         internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor =
           internal_static_tensorflow_GPUOptions_Experimental_descriptor.getNestedTypes().get(0);
         internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor,
             new java.lang.String[] { "MemoryLimitMb", "Priority", "DeviceOrdinal", });
    +    internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor =
    +      internal_static_tensorflow_GPUOptions_Experimental_descriptor.getNestedTypes().get(1);
    +    internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_fieldAccessorTable = new
    +      com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
    +        internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor,
    +        new java.lang.String[] { "MergeHostToDeviceStream", "MergeDeviceToHostStream", "MergeDeviceToDeviceStream", });
         internal_static_tensorflow_OptimizerOptions_descriptor =
           getDescriptor().getMessageTypes().get(1);
         internal_static_tensorflow_OptimizerOptions_fieldAccessorTable = new
    @@ -327,7 +350,7 @@ public static void registerAllExtensions(
         internal_static_tensorflow_ConfigProto_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_ConfigProto_descriptor,
    -        new java.lang.String[] { "DeviceCount", "IntraOpParallelismThreads", "InterOpParallelismThreads", "UsePerSessionThreads", "SessionInterOpThreadPool", "PlacementPeriod", "DeviceFilters", "GpuOptions", "AllowSoftPlacement", "LogDevicePlacement", "GraphOptions", "OperationTimeoutInMs", "RpcOptions", "ClusterDef", "IsolateSessionState", "ShareClusterDevicesInSession", "Experimental", });
    +        new java.lang.String[] { "DeviceCount", "IntraOpParallelismThreads", "InterOpParallelismThreads", "UsePerSessionThreads", "SessionInterOpThreadPool", "PlacementPeriod", "DeviceFilters", "GpuOptions", "PluggableDeviceOptions", "AllowSoftPlacement", "LogDevicePlacement", "GraphOptions", "OperationTimeoutInMs", "RpcOptions", "ClusterDef", "IsolateSessionState", "ShareClusterDevicesInSession", "Experimental", });
         internal_static_tensorflow_ConfigProto_DeviceCountEntry_descriptor =
           internal_static_tensorflow_ConfigProto_descriptor.getNestedTypes().get(0);
         internal_static_tensorflow_ConfigProto_DeviceCountEntry_fieldAccessorTable = new
    @@ -339,7 +362,7 @@ public static void registerAllExtensions(
         internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_ConfigProto_Experimental_descriptor,
    -        new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "DisableFunctionalOpsLowering", "XlaPreferSingleGraphCluster", "CoordinationConfig", "DisableOptimizeForStaticGraph", "DisableEagerExecutorStreamingEnqueue", });
    +        new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "EnableMultiHost", "TfrtUseIfrt", "BackendServerPort", "TargetTpu", "TargetGpu", "StreamMergeThreshold", "DisableFunctionalOpsLowering", "XlaPreferSingleGraphCluster", "CoordinationConfig", "DisableOptimizeForStaticGraph", "DisableEagerExecutorStreamingEnqueue", });
         internal_static_tensorflow_RunOptions_descriptor =
           getDescriptor().getMessageTypes().get(6);
         internal_static_tensorflow_RunOptions_fieldAccessorTable = new
    diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CoordinationConfig.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CoordinationConfig.java
    index 6c1f875d2f6..5dfed710211 100644
    --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CoordinationConfig.java
    +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/CoordinationConfig.java
    @@ -853,6 +853,17 @@ org.tensorflow.proto.CoordinationConfig.CoordinatedJobOrBuilder getCoordinatedJo
          * @return The forceDisable.
          */
         boolean getForceDisable();
    +
    +    /**
    +     * 
    +     * Use long polling to get error from coordination service as the error
    +     * propagation mechanism.
    +     * 
    + * + * bool poll_for_error_from_service_at_startup = 13; + * @return The pollForErrorFromServiceAtStartup. + */ + boolean getPollForErrorFromServiceAtStartup(); } /** *
    @@ -1223,6 +1234,22 @@ public boolean getForceDisable() {
           return forceDisable_;
         }
     
    +    public static final int POLL_FOR_ERROR_FROM_SERVICE_AT_STARTUP_FIELD_NUMBER = 13;
    +    private boolean pollForErrorFromServiceAtStartup_;
    +    /**
    +     * 
    +     * Use long polling to get error from coordination service as the error
    +     * propagation mechanism.
    +     * 
    + * + * bool poll_for_error_from_service_at_startup = 13; + * @return The pollForErrorFromServiceAtStartup. + */ + @java.lang.Override + public boolean getPollForErrorFromServiceAtStartup() { + return pollForErrorFromServiceAtStartup_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -1270,6 +1297,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (forceDisable_ != false) { output.writeBool(12, forceDisable_); } + if (pollForErrorFromServiceAtStartup_ != false) { + output.writeBool(13, pollForErrorFromServiceAtStartup_); + } getUnknownFields().writeTo(output); } @@ -1325,6 +1355,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBoolSize(12, forceDisable_); } + if (pollForErrorFromServiceAtStartup_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(13, pollForErrorFromServiceAtStartup_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -1362,6 +1396,8 @@ public boolean equals(final java.lang.Object obj) { != other.getAllowNewIncarnationToReconnect()) return false; if (getForceDisable() != other.getForceDisable()) return false; + if (getPollForErrorFromServiceAtStartup() + != other.getPollForErrorFromServiceAtStartup()) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -1406,6 +1442,9 @@ public int hashCode() { hash = (37 * hash) + FORCE_DISABLE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getForceDisable()); + hash = (37 * hash) + POLL_FOR_ERROR_FROM_SERVICE_AT_STARTUP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getPollForErrorFromServiceAtStartup()); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1566,6 +1605,8 @@ public Builder clear() { forceDisable_ = false; + pollForErrorFromServiceAtStartup_ = false; + return this; } @@ -1616,6 +1657,7 @@ public org.tensorflow.proto.CoordinationConfig.CoordinationServiceConfig buildPa result.recoverableJobs_ = recoverableJobs_; result.allowNewIncarnationToReconnect_ = allowNewIncarnationToReconnect_; result.forceDisable_ = forceDisable_; + result.pollForErrorFromServiceAtStartup_ = pollForErrorFromServiceAtStartup_; onBuilt(); return result; } @@ -1729,6 +1771,9 @@ public Builder mergeFrom(org.tensorflow.proto.CoordinationConfig.CoordinationSer if (other.getForceDisable() != false) { setForceDisable(other.getForceDisable()); } + if (other.getPollForErrorFromServiceAtStartup() != false) { + setPollForErrorFromServiceAtStartup(other.getPollForErrorFromServiceAtStartup()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1819,6 +1864,11 @@ public Builder mergeFrom( break; } // case 96 + case 104: { + pollForErrorFromServiceAtStartup_ = input.readBool(); + + break; + } // case 104 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -2798,6 +2848,52 @@ public Builder clearForceDisable() { onChanged(); return this; } + + private boolean pollForErrorFromServiceAtStartup_ ; + /** + *
    +       * Use long polling to get error from coordination service as the error
    +       * propagation mechanism.
    +       * 
    + * + * bool poll_for_error_from_service_at_startup = 13; + * @return The pollForErrorFromServiceAtStartup. + */ + @java.lang.Override + public boolean getPollForErrorFromServiceAtStartup() { + return pollForErrorFromServiceAtStartup_; + } + /** + *
    +       * Use long polling to get error from coordination service as the error
    +       * propagation mechanism.
    +       * 
    + * + * bool poll_for_error_from_service_at_startup = 13; + * @param value The pollForErrorFromServiceAtStartup to set. + * @return This builder for chaining. + */ + public Builder setPollForErrorFromServiceAtStartup(boolean value) { + + pollForErrorFromServiceAtStartup_ = value; + onChanged(); + return this; + } + /** + *
    +       * Use long polling to get error from coordination service as the error
    +       * propagation mechanism.
    +       * 
    + * + * bool poll_for_error_from_service_at_startup = 13; + * @return This builder for chaining. + */ + public Builder clearPollForErrorFromServiceAtStartup() { + + pollForErrorFromServiceAtStartup_ = false; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -2883,7 +2979,7 @@ public org.tensorflow.proto.CoordinationConfig.CoordinationServiceConfig getDefa java.lang.String[] descriptorData = { "\n&tsl/protobuf/coordination_config.proto" + "\022\ntensorflow\"1\n\016CoordinatedJob\022\014\n\004name\030\001" + - " \001(\t\022\021\n\tnum_tasks\030\002 \001(\005\"\240\003\n\031Coordination" + + " \001(\t\022\021\n\tnum_tasks\030\002 \001(\005\"\320\003\n\031Coordination" + "ServiceConfig\022\024\n\014service_type\030\001 \001(\t\022\026\n\016s" + "ervice_leader\030\002 \001(\t\022\033\n\023enable_health_che" + "ck\030\003 \001(\010\022&\n\036cluster_register_timeout_in_" + @@ -2893,11 +2989,12 @@ public org.tensorflow.proto.CoordinationConfig.CoordinationServiceConfig getDefa "timeout_in_ms\030\007 \001(\003\022*\n\"agent_destruction" + "_without_shutdown\030\010 \001(\010\022\030\n\020recoverable_j" + "obs\030\t \003(\t\022*\n\"allow_new_incarnation_to_re" + - "connect\030\013 \001(\010\022\025\n\rforce_disable\030\014 \001(\010J\004\010\006" + - "\020\007Bm\n\024org.tensorflow.protoZUgithub.com/t" + - "ensorflow/tensorflow/tensorflow/go/core/" + - "protobuf/for_core_protos_go_protob\006proto" + - "3" + "connect\030\013 \001(\010\022\025\n\rforce_disable\030\014 \001(\010\022.\n&" + + "poll_for_error_from_service_at_startup\030\r" + + " \001(\010J\004\010\006\020\007Bm\n\024org.tensorflow.protoZUgith" + + "ub.com/tensorflow/tensorflow/tensorflow/" + + "go/core/protobuf/for_core_protos_go_prot" + + "ob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -2914,7 +3011,7 @@ public org.tensorflow.proto.CoordinationConfig.CoordinationServiceConfig getDefa internal_static_tensorflow_CoordinationServiceConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_CoordinationServiceConfig_descriptor, - new java.lang.String[] { "ServiceType", "ServiceLeader", "EnableHealthCheck", "ClusterRegisterTimeoutInMs", "HeartbeatTimeoutInMs", "CoordinatedJobList", "ShutdownBarrierTimeoutInMs", "AgentDestructionWithoutShutdown", "RecoverableJobs", "AllowNewIncarnationToReconnect", "ForceDisable", }); + new java.lang.String[] { "ServiceType", "ServiceLeader", "EnableHealthCheck", "ClusterRegisterTimeoutInMs", "HeartbeatTimeoutInMs", "CoordinatedJobList", "ShutdownBarrierTimeoutInMs", "AgentDestructionWithoutShutdown", "RecoverableJobs", "AllowNewIncarnationToReconnect", "ForceDisable", "PollForErrorFromServiceAtStartup", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValue.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValue.java index 44deff4cb4d..0b6ce2fef52 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValue.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValue.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValueOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValueOrBuilder.java index 525dfd70275..6338554d477 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValueOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/EntryValueOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfo.java index f07305dc1aa..858f216fb45 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfo.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfoOrBuilder.java index 6aefc92ee8c..02d2cc61740 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUInfoOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUOptions.java index 8eac8bc4ef1..d9db2330adb 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/GPUOptions.java @@ -467,6 +467,43 @@ org.tensorflow.proto.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualD * @return The gpuSystemMemorySizeInMb. */ int getGpuSystemMemorySizeInMb(); + + /** + *
    +     * If true, save information needed for created a PjRt GPU client for
    +     * creating a client with remote devices.
    +     * 
    + * + * bool populate_pjrt_gpu_client_creation_info = 17; + * @return The populatePjrtGpuClientCreationInfo. + */ + boolean getPopulatePjrtGpuClientCreationInfo(); + + /** + *
    +     * node_id for use when creating a PjRt GPU client with remote devices,
    +     * which enumerates jobs*tasks from a ServerDef.
    +     * 
    + * + * int32 node_id = 18; + * @return The nodeId. + */ + int getNodeId(); + + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return Whether the streamMergeOptions field is set. + */ + boolean hasStreamMergeOptions(); + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return The streamMergeOptions. + */ + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getStreamMergeOptions(); + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder getStreamMergeOptionsOrBuilder(); } /** * Protobuf type {@code tensorflow.GPUOptions.Experimental} @@ -1673,105 +1710,846 @@ private void ensureDeviceOrdinalIsMutable() { } /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @return The count of deviceOrdinal. + */ + public int getDeviceOrdinalCount() { + return deviceOrdinal_.size(); + } + /** + *
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @param index The index of the element to return. + * @return The deviceOrdinal at the given index. + */ + public int getDeviceOrdinal(int index) { + return deviceOrdinal_.getInt(index); + } + /** + *
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @param index The index to set the value at. + * @param value The deviceOrdinal to set. + * @return This builder for chaining. + */ + public Builder setDeviceOrdinal( + int index, int value) { + ensureDeviceOrdinalIsMutable(); + deviceOrdinal_.setInt(index, value); + onChanged(); + return this; + } + /** + *
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @param value The deviceOrdinal to add. + * @return This builder for chaining. + */ + public Builder addDeviceOrdinal(int value) { + ensureDeviceOrdinalIsMutable(); + deviceOrdinal_.addInt(value); + onChanged(); + return this; + } + /** + *
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @param values The deviceOrdinal to add. + * @return This builder for chaining. + */ + public Builder addAllDeviceOrdinal( + java.lang.Iterable values) { + ensureDeviceOrdinalIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, deviceOrdinal_); + onChanged(); + return this; + } + /** + *
    +         * Virtual Device ordinal number determines the device ID of the device.
    +         * A Virtual device with a lower ordinal number always receives the a
    +         * smaller device id. The phyiscal device id and location in the
    +         * virtual device list is used to break ties.
    +         * 
    + * + * repeated int32 device_ordinal = 3; + * @return This builder for chaining. + */ + public Builder clearDeviceOrdinal() { + deviceOrdinal_ = emptyIntList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) + } + + // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) + private static final org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices(); + } + + public static org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public VirtualDevices parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface StreamMergeOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental.StreamMergeOptions) + com.google.protobuf.MessageOrBuilder { + + /** + *
    +       * If true, the compute stream will be used for host_to_device copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream to finish. There is
    +       * also no need to wait for the copy to complete before executing the
    +       * callback function.
    +       * 
    + * + * bool merge_host_to_device_stream = 1; + * @return The mergeHostToDeviceStream. + */ + boolean getMergeHostToDeviceStream(); + + /** + *
    +       * If true, the compute stream will be used for device_to_host copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream to finish.
    +       * 
    + * + * bool merge_device_to_host_stream = 2; + * @return The mergeDeviceToHostStream. + */ + boolean getMergeDeviceToHostStream(); + + /** + *
    +       * If true, the compute stream will be used for device_to_device copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream of the sending device
    +       * to finish. There is also no need to wait for the compute stream of the
    +       * receiving device to finish if the copy is within the same device.
    +       * 
    + * + * bool merge_device_to_device_stream = 3; + * @return The mergeDeviceToDeviceStream. + */ + boolean getMergeDeviceToDeviceStream(); + } + /** + *
    +     * Whether to merge data transfer streams into the compute stream in the
    +     * same stream group. Stream merging helps reduce the overhead caused by
    +     * stream synchronization, especially when data transfers are frequent. For
    +     * example, setting "merge_host_to_device_stream = true" will make the
    +     * compute stream responsible for both computation and host to device memory
    +     * copy.
    +     * 
    + * + * Protobuf type {@code tensorflow.GPUOptions.Experimental.StreamMergeOptions} + */ + public static final class StreamMergeOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental.StreamMergeOptions) + StreamMergeOptionsOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamMergeOptions.newBuilder() to construct. + private StreamMergeOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private StreamMergeOptions() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new StreamMergeOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.class, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder.class); + } + + public static final int MERGE_HOST_TO_DEVICE_STREAM_FIELD_NUMBER = 1; + private boolean mergeHostToDeviceStream_; + /** + *
    +       * If true, the compute stream will be used for host_to_device copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream to finish. There is
    +       * also no need to wait for the copy to complete before executing the
    +       * callback function.
    +       * 
    + * + * bool merge_host_to_device_stream = 1; + * @return The mergeHostToDeviceStream. + */ + @java.lang.Override + public boolean getMergeHostToDeviceStream() { + return mergeHostToDeviceStream_; + } + + public static final int MERGE_DEVICE_TO_HOST_STREAM_FIELD_NUMBER = 2; + private boolean mergeDeviceToHostStream_; + /** + *
    +       * If true, the compute stream will be used for device_to_host copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream to finish.
    +       * 
    + * + * bool merge_device_to_host_stream = 2; + * @return The mergeDeviceToHostStream. + */ + @java.lang.Override + public boolean getMergeDeviceToHostStream() { + return mergeDeviceToHostStream_; + } + + public static final int MERGE_DEVICE_TO_DEVICE_STREAM_FIELD_NUMBER = 3; + private boolean mergeDeviceToDeviceStream_; + /** + *
    +       * If true, the compute stream will be used for device_to_device copy as
    +       * well. It's no longer necessary to record an event before the copy to
    +       * let the copy stream wait for the compute stream of the sending device
    +       * to finish. There is also no need to wait for the compute stream of the
    +       * receiving device to finish if the copy is within the same device.
    +       * 
    + * + * bool merge_device_to_device_stream = 3; + * @return The mergeDeviceToDeviceStream. + */ + @java.lang.Override + public boolean getMergeDeviceToDeviceStream() { + return mergeDeviceToDeviceStream_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (mergeHostToDeviceStream_ != false) { + output.writeBool(1, mergeHostToDeviceStream_); + } + if (mergeDeviceToHostStream_ != false) { + output.writeBool(2, mergeDeviceToHostStream_); + } + if (mergeDeviceToDeviceStream_ != false) { + output.writeBool(3, mergeDeviceToDeviceStream_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (mergeHostToDeviceStream_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, mergeHostToDeviceStream_); + } + if (mergeDeviceToHostStream_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mergeDeviceToHostStream_); + } + if (mergeDeviceToDeviceStream_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, mergeDeviceToDeviceStream_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions other = (org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions) obj; + + if (getMergeHostToDeviceStream() + != other.getMergeHostToDeviceStream()) return false; + if (getMergeDeviceToHostStream() + != other.getMergeDeviceToHostStream()) return false; + if (getMergeDeviceToDeviceStream() + != other.getMergeDeviceToDeviceStream()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MERGE_HOST_TO_DEVICE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMergeHostToDeviceStream()); + hash = (37 * hash) + MERGE_DEVICE_TO_HOST_STREAM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMergeDeviceToHostStream()); + hash = (37 * hash) + MERGE_DEVICE_TO_DEVICE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getMergeDeviceToDeviceStream()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +       * Whether to merge data transfer streams into the compute stream in the
    +       * same stream group. Stream merging helps reduce the overhead caused by
    +       * stream synchronization, especially when data transfers are frequent. For
    +       * example, setting "merge_host_to_device_stream = true" will make the
    +       * compute stream responsible for both computation and host to device memory
    +       * copy.
    +       * 
    + * + * Protobuf type {@code tensorflow.GPUOptions.Experimental.StreamMergeOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental.StreamMergeOptions) + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.class, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + mergeHostToDeviceStream_ = false; + + mergeDeviceToHostStream_ = false; + + mergeDeviceToDeviceStream_ = false; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_StreamMergeOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getDefaultInstanceForType() { + return org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions build() { + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions buildPartial() { + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions result = new org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions(this); + result.mergeHostToDeviceStream_ = mergeHostToDeviceStream_; + result.mergeDeviceToHostStream_ = mergeDeviceToHostStream_; + result.mergeDeviceToDeviceStream_ = mergeDeviceToDeviceStream_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions) { + return mergeFrom((org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions other) { + if (other == org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.getDefaultInstance()) return this; + if (other.getMergeHostToDeviceStream() != false) { + setMergeHostToDeviceStream(other.getMergeHostToDeviceStream()); + } + if (other.getMergeDeviceToHostStream() != false) { + setMergeDeviceToHostStream(other.getMergeDeviceToHostStream()); + } + if (other.getMergeDeviceToDeviceStream() != false) { + setMergeDeviceToDeviceStream(other.getMergeDeviceToDeviceStream()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + mergeHostToDeviceStream_ = input.readBool(); + + break; + } // case 8 + case 16: { + mergeDeviceToHostStream_ = input.readBool(); + + break; + } // case 16 + case 24: { + mergeDeviceToDeviceStream_ = input.readBool(); + + break; + } // case 24 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private boolean mergeHostToDeviceStream_ ; + /** + *
    +         * If true, the compute stream will be used for host_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish. There is
    +         * also no need to wait for the copy to complete before executing the
    +         * callback function.
    +         * 
    + * + * bool merge_host_to_device_stream = 1; + * @return The mergeHostToDeviceStream. + */ + @java.lang.Override + public boolean getMergeHostToDeviceStream() { + return mergeHostToDeviceStream_; + } + /** + *
    +         * If true, the compute stream will be used for host_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish. There is
    +         * also no need to wait for the copy to complete before executing the
    +         * callback function.
    +         * 
    + * + * bool merge_host_to_device_stream = 1; + * @param value The mergeHostToDeviceStream to set. + * @return This builder for chaining. + */ + public Builder setMergeHostToDeviceStream(boolean value) { + + mergeHostToDeviceStream_ = value; + onChanged(); + return this; + } + /** + *
    +         * If true, the compute stream will be used for host_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish. There is
    +         * also no need to wait for the copy to complete before executing the
    +         * callback function.
              * 
    * - * repeated int32 device_ordinal = 3; - * @return The count of deviceOrdinal. + * bool merge_host_to_device_stream = 1; + * @return This builder for chaining. */ - public int getDeviceOrdinalCount() { - return deviceOrdinal_.size(); + public Builder clearMergeHostToDeviceStream() { + + mergeHostToDeviceStream_ = false; + onChanged(); + return this; } + + private boolean mergeDeviceToHostStream_ ; /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * If true, the compute stream will be used for device_to_host copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish.
              * 
    * - * repeated int32 device_ordinal = 3; - * @param index The index of the element to return. - * @return The deviceOrdinal at the given index. + * bool merge_device_to_host_stream = 2; + * @return The mergeDeviceToHostStream. */ - public int getDeviceOrdinal(int index) { - return deviceOrdinal_.getInt(index); + @java.lang.Override + public boolean getMergeDeviceToHostStream() { + return mergeDeviceToHostStream_; } /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * If true, the compute stream will be used for device_to_host copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish.
              * 
    * - * repeated int32 device_ordinal = 3; - * @param index The index to set the value at. - * @param value The deviceOrdinal to set. + * bool merge_device_to_host_stream = 2; + * @param value The mergeDeviceToHostStream to set. * @return This builder for chaining. */ - public Builder setDeviceOrdinal( - int index, int value) { - ensureDeviceOrdinalIsMutable(); - deviceOrdinal_.setInt(index, value); + public Builder setMergeDeviceToHostStream(boolean value) { + + mergeDeviceToHostStream_ = value; onChanged(); return this; } /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * If true, the compute stream will be used for device_to_host copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream to finish.
              * 
    * - * repeated int32 device_ordinal = 3; - * @param value The deviceOrdinal to add. + * bool merge_device_to_host_stream = 2; * @return This builder for chaining. */ - public Builder addDeviceOrdinal(int value) { - ensureDeviceOrdinalIsMutable(); - deviceOrdinal_.addInt(value); + public Builder clearMergeDeviceToHostStream() { + + mergeDeviceToHostStream_ = false; onChanged(); return this; } + + private boolean mergeDeviceToDeviceStream_ ; /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * If true, the compute stream will be used for device_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream of the sending device
    +         * to finish. There is also no need to wait for the compute stream of the
    +         * receiving device to finish if the copy is within the same device.
              * 
    * - * repeated int32 device_ordinal = 3; - * @param values The deviceOrdinal to add. + * bool merge_device_to_device_stream = 3; + * @return The mergeDeviceToDeviceStream. + */ + @java.lang.Override + public boolean getMergeDeviceToDeviceStream() { + return mergeDeviceToDeviceStream_; + } + /** + *
    +         * If true, the compute stream will be used for device_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream of the sending device
    +         * to finish. There is also no need to wait for the compute stream of the
    +         * receiving device to finish if the copy is within the same device.
    +         * 
    + * + * bool merge_device_to_device_stream = 3; + * @param value The mergeDeviceToDeviceStream to set. * @return This builder for chaining. */ - public Builder addAllDeviceOrdinal( - java.lang.Iterable values) { - ensureDeviceOrdinalIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, deviceOrdinal_); + public Builder setMergeDeviceToDeviceStream(boolean value) { + + mergeDeviceToDeviceStream_ = value; onChanged(); return this; } /** *
    -         * Virtual Device ordinal number determines the device ID of the device.
    -         * A Virtual device with a lower ordinal number always receives the a
    -         * smaller device id. The phyiscal device id and location in the
    -         * virtual device list is used to break ties.
    +         * If true, the compute stream will be used for device_to_device copy as
    +         * well. It's no longer necessary to record an event before the copy to
    +         * let the copy stream wait for the compute stream of the sending device
    +         * to finish. There is also no need to wait for the compute stream of the
    +         * receiving device to finish if the copy is within the same device.
              * 
    * - * repeated int32 device_ordinal = 3; + * bool merge_device_to_device_stream = 3; * @return This builder for chaining. */ - public Builder clearDeviceOrdinal() { - deviceOrdinal_ = emptyIntList(); - bitField0_ = (bitField0_ & ~0x00000004); + public Builder clearMergeDeviceToDeviceStream() { + + mergeDeviceToDeviceStream_ = false; onChanged(); return this; } @@ -1788,23 +2566,23 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) + // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.StreamMergeOptions) } - // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) - private static final org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.StreamMergeOptions) + private static final org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices(); + DEFAULT_INSTANCE = new org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions(); } - public static org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices getDefaultInstance() { + public static org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getDefaultInstance() { return DEFAULT_INSTANCE; } - private static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public VirtualDevices parsePartialFrom( + public StreamMergeOptions parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1823,17 +2601,17 @@ public VirtualDevices parsePartialFrom( } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.tensorflow.proto.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() { + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -2365,6 +3143,64 @@ public int getGpuSystemMemorySizeInMb() { return gpuSystemMemorySizeInMb_; } + public static final int POPULATE_PJRT_GPU_CLIENT_CREATION_INFO_FIELD_NUMBER = 17; + private boolean populatePjrtGpuClientCreationInfo_; + /** + *
    +     * If true, save information needed for created a PjRt GPU client for
    +     * creating a client with remote devices.
    +     * 
    + * + * bool populate_pjrt_gpu_client_creation_info = 17; + * @return The populatePjrtGpuClientCreationInfo. + */ + @java.lang.Override + public boolean getPopulatePjrtGpuClientCreationInfo() { + return populatePjrtGpuClientCreationInfo_; + } + + public static final int NODE_ID_FIELD_NUMBER = 18; + private int nodeId_; + /** + *
    +     * node_id for use when creating a PjRt GPU client with remote devices,
    +     * which enumerates jobs*tasks from a ServerDef.
    +     * 
    + * + * int32 node_id = 18; + * @return The nodeId. + */ + @java.lang.Override + public int getNodeId() { + return nodeId_; + } + + public static final int STREAM_MERGE_OPTIONS_FIELD_NUMBER = 19; + private org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions streamMergeOptions_; + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return Whether the streamMergeOptions field is set. + */ + @java.lang.Override + public boolean hasStreamMergeOptions() { + return streamMergeOptions_ != null; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return The streamMergeOptions. + */ + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getStreamMergeOptions() { + return streamMergeOptions_ == null ? org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.getDefaultInstance() : streamMergeOptions_; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + @java.lang.Override + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder getStreamMergeOptionsOrBuilder() { + return getStreamMergeOptions(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -2424,6 +3260,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (gpuSystemMemorySizeInMb_ != 0) { output.writeInt32(16, gpuSystemMemorySizeInMb_); } + if (populatePjrtGpuClientCreationInfo_ != false) { + output.writeBool(17, populatePjrtGpuClientCreationInfo_); + } + if (nodeId_ != 0) { + output.writeInt32(18, nodeId_); + } + if (streamMergeOptions_ != null) { + output.writeMessage(19, getStreamMergeOptions()); + } getUnknownFields().writeTo(output); } @@ -2492,6 +3337,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(16, gpuSystemMemorySizeInMb_); } + if (populatePjrtGpuClientCreationInfo_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(17, populatePjrtGpuClientCreationInfo_); + } + if (nodeId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(18, nodeId_); + } + if (streamMergeOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(19, getStreamMergeOptions()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -2539,6 +3396,15 @@ public boolean equals(final java.lang.Object obj) { != other.getGpuHostMemDisallowGrowth()) return false; if (getGpuSystemMemorySizeInMb() != other.getGpuSystemMemorySizeInMb()) return false; + if (getPopulatePjrtGpuClientCreationInfo() + != other.getPopulatePjrtGpuClientCreationInfo()) return false; + if (getNodeId() + != other.getNodeId()) return false; + if (hasStreamMergeOptions() != other.hasStreamMergeOptions()) return false; + if (hasStreamMergeOptions()) { + if (!getStreamMergeOptions() + .equals(other.getStreamMergeOptions())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -2589,6 +3455,15 @@ public int hashCode() { getGpuHostMemDisallowGrowth()); hash = (37 * hash) + GPU_SYSTEM_MEMORY_SIZE_IN_MB_FIELD_NUMBER; hash = (53 * hash) + getGpuSystemMemorySizeInMb(); + hash = (37 * hash) + POPULATE_PJRT_GPU_CLIENT_CREATION_INFO_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getPopulatePjrtGpuClientCreationInfo()); + hash = (37 * hash) + NODE_ID_FIELD_NUMBER; + hash = (53 * hash) + getNodeId(); + if (hasStreamMergeOptions()) { + hash = (37 * hash) + STREAM_MERGE_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getStreamMergeOptions().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -2752,6 +3627,16 @@ public Builder clear() { gpuSystemMemorySizeInMb_ = 0; + populatePjrtGpuClientCreationInfo_ = false; + + nodeId_ = 0; + + if (streamMergeOptionsBuilder_ == null) { + streamMergeOptions_ = null; + } else { + streamMergeOptions_ = null; + streamMergeOptionsBuilder_ = null; + } return this; } @@ -2802,6 +3687,13 @@ public org.tensorflow.proto.GPUOptions.Experimental buildPartial() { result.gpuHostMemLimitInMb_ = gpuHostMemLimitInMb_; result.gpuHostMemDisallowGrowth_ = gpuHostMemDisallowGrowth_; result.gpuSystemMemorySizeInMb_ = gpuSystemMemorySizeInMb_; + result.populatePjrtGpuClientCreationInfo_ = populatePjrtGpuClientCreationInfo_; + result.nodeId_ = nodeId_; + if (streamMergeOptionsBuilder_ == null) { + result.streamMergeOptions_ = streamMergeOptions_; + } else { + result.streamMergeOptions_ = streamMergeOptionsBuilder_.build(); + } onBuilt(); return result; } @@ -2919,6 +3811,15 @@ public Builder mergeFrom(org.tensorflow.proto.GPUOptions.Experimental other) { if (other.getGpuSystemMemorySizeInMb() != 0) { setGpuSystemMemorySizeInMb(other.getGpuSystemMemorySizeInMb()); } + if (other.getPopulatePjrtGpuClientCreationInfo() != false) { + setPopulatePjrtGpuClientCreationInfo(other.getPopulatePjrtGpuClientCreationInfo()); + } + if (other.getNodeId() != 0) { + setNodeId(other.getNodeId()); + } + if (other.hasStreamMergeOptions()) { + mergeStreamMergeOptions(other.getStreamMergeOptions()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -3028,6 +3929,23 @@ public Builder mergeFrom( break; } // case 128 + case 136: { + populatePjrtGpuClientCreationInfo_ = input.readBool(); + + break; + } // case 136 + case 144: { + nodeId_ = input.readInt32(); + + break; + } // case 144 + case 154: { + input.readMessage( + getStreamMergeOptionsFieldBuilder().getBuilder(), + extensionRegistry); + + break; + } // case 154 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -4783,6 +5701,217 @@ public Builder clearGpuSystemMemorySizeInMb() { onChanged(); return this; } + + private boolean populatePjrtGpuClientCreationInfo_ ; + /** + *
    +       * If true, save information needed for created a PjRt GPU client for
    +       * creating a client with remote devices.
    +       * 
    + * + * bool populate_pjrt_gpu_client_creation_info = 17; + * @return The populatePjrtGpuClientCreationInfo. + */ + @java.lang.Override + public boolean getPopulatePjrtGpuClientCreationInfo() { + return populatePjrtGpuClientCreationInfo_; + } + /** + *
    +       * If true, save information needed for created a PjRt GPU client for
    +       * creating a client with remote devices.
    +       * 
    + * + * bool populate_pjrt_gpu_client_creation_info = 17; + * @param value The populatePjrtGpuClientCreationInfo to set. + * @return This builder for chaining. + */ + public Builder setPopulatePjrtGpuClientCreationInfo(boolean value) { + + populatePjrtGpuClientCreationInfo_ = value; + onChanged(); + return this; + } + /** + *
    +       * If true, save information needed for created a PjRt GPU client for
    +       * creating a client with remote devices.
    +       * 
    + * + * bool populate_pjrt_gpu_client_creation_info = 17; + * @return This builder for chaining. + */ + public Builder clearPopulatePjrtGpuClientCreationInfo() { + + populatePjrtGpuClientCreationInfo_ = false; + onChanged(); + return this; + } + + private int nodeId_ ; + /** + *
    +       * node_id for use when creating a PjRt GPU client with remote devices,
    +       * which enumerates jobs*tasks from a ServerDef.
    +       * 
    + * + * int32 node_id = 18; + * @return The nodeId. + */ + @java.lang.Override + public int getNodeId() { + return nodeId_; + } + /** + *
    +       * node_id for use when creating a PjRt GPU client with remote devices,
    +       * which enumerates jobs*tasks from a ServerDef.
    +       * 
    + * + * int32 node_id = 18; + * @param value The nodeId to set. + * @return This builder for chaining. + */ + public Builder setNodeId(int value) { + + nodeId_ = value; + onChanged(); + return this; + } + /** + *
    +       * node_id for use when creating a PjRt GPU client with remote devices,
    +       * which enumerates jobs*tasks from a ServerDef.
    +       * 
    + * + * int32 node_id = 18; + * @return This builder for chaining. + */ + public Builder clearNodeId() { + + nodeId_ = 0; + onChanged(); + return this; + } + + private org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions streamMergeOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder> streamMergeOptionsBuilder_; + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return Whether the streamMergeOptions field is set. + */ + public boolean hasStreamMergeOptions() { + return streamMergeOptionsBuilder_ != null || streamMergeOptions_ != null; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + * @return The streamMergeOptions. + */ + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions getStreamMergeOptions() { + if (streamMergeOptionsBuilder_ == null) { + return streamMergeOptions_ == null ? org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.getDefaultInstance() : streamMergeOptions_; + } else { + return streamMergeOptionsBuilder_.getMessage(); + } + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public Builder setStreamMergeOptions(org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions value) { + if (streamMergeOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + streamMergeOptions_ = value; + onChanged(); + } else { + streamMergeOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public Builder setStreamMergeOptions( + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder builderForValue) { + if (streamMergeOptionsBuilder_ == null) { + streamMergeOptions_ = builderForValue.build(); + onChanged(); + } else { + streamMergeOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public Builder mergeStreamMergeOptions(org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions value) { + if (streamMergeOptionsBuilder_ == null) { + if (streamMergeOptions_ != null) { + streamMergeOptions_ = + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.newBuilder(streamMergeOptions_).mergeFrom(value).buildPartial(); + } else { + streamMergeOptions_ = value; + } + onChanged(); + } else { + streamMergeOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public Builder clearStreamMergeOptions() { + if (streamMergeOptionsBuilder_ == null) { + streamMergeOptions_ = null; + onChanged(); + } else { + streamMergeOptions_ = null; + streamMergeOptionsBuilder_ = null; + } + + return this; + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder getStreamMergeOptionsBuilder() { + + onChanged(); + return getStreamMergeOptionsFieldBuilder().getBuilder(); + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + public org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder getStreamMergeOptionsOrBuilder() { + if (streamMergeOptionsBuilder_ != null) { + return streamMergeOptionsBuilder_.getMessageOrBuilder(); + } else { + return streamMergeOptions_ == null ? + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.getDefaultInstance() : streamMergeOptions_; + } + } + /** + * .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder> + getStreamMergeOptionsFieldBuilder() { + if (streamMergeOptionsBuilder_ == null) { + streamMergeOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptions.Builder, org.tensorflow.proto.GPUOptions.Experimental.StreamMergeOptionsOrBuilder>( + getStreamMergeOptions(), + getParentForChildren(), + isClean()); + streamMergeOptions_ = null; + } + return streamMergeOptionsBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfiguration.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfiguration.java index 56ab6b425d1..6dbc6ce6f3b 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfiguration.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfiguration.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfigurationOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfigurationOrBuilder.java index 5821218bf8f..e3c944d06be 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfigurationOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MachineConfigurationOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfo.java index 8c4b5b692a6..d351a728e2a 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfo.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfoOrBuilder.java index 265206a7c19..6a2f7e6c9e8 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MemoryInfoOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntry.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntry.java index 70a5e1ba8bc..d9454e9bc70 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntry.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntry.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntryOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntryOrBuilder.java index 9898de2810f..e8f2867a14a 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntryOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/MetricEntryOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfo.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfo.java index 782524cf4d4..d2875cf5041 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfo.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfo.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfoOrBuilder.java index fd9455571a2..caade7d2f32 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/PlatformInfoOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ResourceHandleProto.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ResourceHandleProto.java index 159c9574b47..df26d1e77cd 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ResourceHandleProto.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/ResourceHandleProto.java @@ -59,27 +59,47 @@ public interface DtypeAndShapeOrBuilder extends com.google.protobuf.MessageOrBuilder { /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ int getDtypeValue(); /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ org.tensorflow.proto.DataType getDtype(); /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return Whether the shape field is set. */ boolean hasShape(); /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return The shape. */ org.tensorflow.proto.TensorShapeProto getShape(); /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ org.tensorflow.proto.TensorShapeProtoOrBuilder getShapeOrBuilder(); @@ -132,6 +152,10 @@ protected java.lang.Object newInstance( public static final int DTYPE_FIELD_NUMBER = 1; private int dtype_; /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ @@ -139,6 +163,10 @@ protected java.lang.Object newInstance( return dtype_; } /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ @@ -151,6 +179,10 @@ protected java.lang.Object newInstance( public static final int SHAPE_FIELD_NUMBER = 2; private org.tensorflow.proto.TensorShapeProto shape_; /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return Whether the shape field is set. */ @@ -159,6 +191,10 @@ public boolean hasShape() { return shape_ != null; } /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return The shape. */ @@ -167,6 +203,10 @@ public org.tensorflow.proto.TensorShapeProto getShape() { return shape_ == null ? org.tensorflow.proto.TensorShapeProto.getDefaultInstance() : shape_; } /** + *
    +     * Shape of the tensor.
    +     * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ @java.lang.Override @@ -531,6 +571,10 @@ public Builder mergeFrom( private int dtype_ = 0; /** + *
    +       * Data type of the tensor.
    +       * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ @@ -538,6 +582,10 @@ public Builder mergeFrom( return dtype_; } /** + *
    +       * Data type of the tensor.
    +       * 
    + * * .tensorflow.DataType dtype = 1; * @param value The enum numeric value on the wire for dtype to set. * @return This builder for chaining. @@ -549,6 +597,10 @@ public Builder setDtypeValue(int value) { return this; } /** + *
    +       * Data type of the tensor.
    +       * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ @@ -559,6 +611,10 @@ public org.tensorflow.proto.DataType getDtype() { return result == null ? org.tensorflow.proto.DataType.UNRECOGNIZED : result; } /** + *
    +       * Data type of the tensor.
    +       * 
    + * * .tensorflow.DataType dtype = 1; * @param value The dtype to set. * @return This builder for chaining. @@ -573,6 +629,10 @@ public Builder setDtype(org.tensorflow.proto.DataType value) { return this; } /** + *
    +       * Data type of the tensor.
    +       * 
    + * * .tensorflow.DataType dtype = 1; * @return This builder for chaining. */ @@ -587,6 +647,10 @@ public Builder clearDtype() { private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.proto.TensorShapeProto, org.tensorflow.proto.TensorShapeProto.Builder, org.tensorflow.proto.TensorShapeProtoOrBuilder> shapeBuilder_; /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return Whether the shape field is set. */ @@ -594,6 +658,10 @@ public boolean hasShape() { return shapeBuilder_ != null || shape_ != null; } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; * @return The shape. */ @@ -605,6 +673,10 @@ public org.tensorflow.proto.TensorShapeProto getShape() { } } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public Builder setShape(org.tensorflow.proto.TensorShapeProto value) { @@ -621,6 +693,10 @@ public Builder setShape(org.tensorflow.proto.TensorShapeProto value) { return this; } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public Builder setShape( @@ -635,6 +711,10 @@ public Builder setShape( return this; } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public Builder mergeShape(org.tensorflow.proto.TensorShapeProto value) { @@ -653,6 +733,10 @@ public Builder mergeShape(org.tensorflow.proto.TensorShapeProto value) { return this; } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public Builder clearShape() { @@ -667,6 +751,10 @@ public Builder clearShape() { return this; } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public org.tensorflow.proto.TensorShapeProto.Builder getShapeBuilder() { @@ -675,6 +763,10 @@ public org.tensorflow.proto.TensorShapeProto.Builder getShapeBuilder() { return getShapeFieldBuilder().getBuilder(); } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ public org.tensorflow.proto.TensorShapeProtoOrBuilder getShapeOrBuilder() { @@ -686,6 +778,10 @@ public org.tensorflow.proto.TensorShapeProtoOrBuilder getShapeOrBuilder() { } } /** + *
    +       * Shape of the tensor.
    +       * 
    + * * .tensorflow.TensorShapeProto shape = 2; */ private com.google.protobuf.SingleFieldBuilderV3< diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfig.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfig.java index ae97c9cc75f..c235fb30634 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfig.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfig.java @@ -2002,8 +2002,8 @@ public boolean getDisableModelPruning() { private int autoMixedPrecision_; /** *
    -   * Optimize data types for CUDA (default is OFF).
    -   * This will try to use float16 on GPU which is faster.
    +   * Optimize data types for CUDA/oneDNN (default is OFF).
    +   * This will try to use float16 on GPU/CPU which is faster.
        * Note that this can change the numerical stability of the graph and may
        * require the use of loss scaling to maintain model convergence.
        * 
    @@ -2016,8 +2016,8 @@ public boolean getDisableModelPruning() { } /** *
    -   * Optimize data types for CUDA (default is OFF).
    -   * This will try to use float16 on GPU which is faster.
    +   * Optimize data types for CUDA/oneDNN (default is OFF).
    +   * This will try to use float16 on GPU/CPU which is faster.
        * Note that this can change the numerical stability of the graph and may
        * require the use of loss scaling to maintain model convergence.
        * 
    @@ -5074,8 +5074,8 @@ public Builder clearImplementationSelector() { private int autoMixedPrecision_ = 0; /** *
    -     * Optimize data types for CUDA (default is OFF).
    -     * This will try to use float16 on GPU which is faster.
    +     * Optimize data types for CUDA/oneDNN (default is OFF).
    +     * This will try to use float16 on GPU/CPU which is faster.
          * Note that this can change the numerical stability of the graph and may
          * require the use of loss scaling to maintain model convergence.
          * 
    @@ -5088,8 +5088,8 @@ public Builder clearImplementationSelector() { } /** *
    -     * Optimize data types for CUDA (default is OFF).
    -     * This will try to use float16 on GPU which is faster.
    +     * Optimize data types for CUDA/oneDNN (default is OFF).
    +     * This will try to use float16 on GPU/CPU which is faster.
          * Note that this can change the numerical stability of the graph and may
          * require the use of loss scaling to maintain model convergence.
          * 
    @@ -5106,8 +5106,8 @@ public Builder setAutoMixedPrecisionValue(int value) { } /** *
    -     * Optimize data types for CUDA (default is OFF).
    -     * This will try to use float16 on GPU which is faster.
    +     * Optimize data types for CUDA/oneDNN (default is OFF).
    +     * This will try to use float16 on GPU/CPU which is faster.
          * Note that this can change the numerical stability of the graph and may
          * require the use of loss scaling to maintain model convergence.
          * 
    @@ -5123,8 +5123,8 @@ public org.tensorflow.proto.RewriterConfig.Toggle getAutoMixedPrecision() { } /** *
    -     * Optimize data types for CUDA (default is OFF).
    -     * This will try to use float16 on GPU which is faster.
    +     * Optimize data types for CUDA/oneDNN (default is OFF).
    +     * This will try to use float16 on GPU/CPU which is faster.
          * Note that this can change the numerical stability of the graph and may
          * require the use of loss scaling to maintain model convergence.
          * 
    @@ -5144,8 +5144,8 @@ public Builder setAutoMixedPrecision(org.tensorflow.proto.RewriterConfig.Toggle } /** *
    -     * Optimize data types for CUDA (default is OFF).
    -     * This will try to use float16 on GPU which is faster.
    +     * Optimize data types for CUDA/oneDNN (default is OFF).
    +     * This will try to use float16 on GPU/CPU which is faster.
          * Note that this can change the numerical stability of the graph and may
          * require the use of loss scaling to maintain model convergence.
          * 
    diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfigOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfigOrBuilder.java index 9ad4b3cf401..2676ca54911 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfigOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RewriterConfigOrBuilder.java @@ -305,8 +305,8 @@ public interface RewriterConfigOrBuilder extends /** *
    -   * Optimize data types for CUDA (default is OFF).
    -   * This will try to use float16 on GPU which is faster.
    +   * Optimize data types for CUDA/oneDNN (default is OFF).
    +   * This will try to use float16 on GPU/CPU which is faster.
        * Note that this can change the numerical stability of the graph and may
        * require the use of loss scaling to maintain model convergence.
        * 
    @@ -317,8 +317,8 @@ public interface RewriterConfigOrBuilder extends int getAutoMixedPrecisionValue(); /** *
    -   * Optimize data types for CUDA (default is OFF).
    -   * This will try to use float16 on GPU which is faster.
    +   * Optimize data types for CUDA/oneDNN (default is OFF).
    +   * This will try to use float16 on GPU/CPU which is faster.
        * Note that this can change the numerical stability of the graph and may
        * require the use of loss scaling to maintain model convergence.
        * 
    diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfiguration.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfiguration.java index 2a17bdafaf1..f8f244b522c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfiguration.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfiguration.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfigurationOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfigurationOrBuilder.java index a3b3ca982e8..4f2ef9a6b2c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfigurationOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/RunConfigurationOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDef.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDef.java index ecb73cc96e7..b701daabd03 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDef.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDef.java @@ -7,58 +7,6 @@ *
      * SignatureDef defines the signature of a computation supported by a TensorFlow
      * graph.
    - * For example, a model with two loss computations, sharing a single input,
    - * might have the following signature_def map, in a MetaGraphDef message.
    - * Note that across the two SignatureDefs "loss_A" and "loss_B", the input key,
    - * output key, and method_name are identical, and will be used by system(s) that
    - * implement or rely upon this particular loss method. The output tensor names
    - * differ, demonstrating how different outputs can exist for the same method.
    - * signature_def {
    - *   key: "loss_A"
    - *   value {
    - *     inputs {
    - *       key: "input"
    - *       value {
    - *         name: "input:0"
    - *         dtype: DT_STRING
    - *         tensor_shape: ...
    - *       }
    - *     }
    - *     outputs {
    - *       key: "loss_output"
    - *       value {
    - *         name: "loss_output_A:0"
    - *         dtype: DT_FLOAT
    - *         tensor_shape: ...
    - *       }
    - *     }
    - *     method_name: "some/package/compute_loss"
    - *   }
    - *   ...
    - * }
    - * signature_def {
    - *   key: "loss_B"
    - *   value {
    - *     inputs {
    - *       key: "input"
    - *       value {
    - *         name: "input:0"
    - *         dtype: DT_STRING
    - *         tensor_shape: ...
    - *       }
    - *     }
    - *     outputs {
    - *       key: "loss_output"
    - *       value {
    - *         name: "loss_output_B:0"
    - *         dtype: DT_FLOAT
    - *         tensor_shape: ...
    - *       }
    - *     }
    - *     method_name: "some/package/compute_loss"
    - *   }
    - *   ...
    - * }
      * 
    * * Protobuf type {@code tensorflow.SignatureDef} @@ -315,13 +263,12 @@ public org.tensorflow.proto.TensorInfo getOutputsOrThrow( private volatile java.lang.Object methodName_; /** *
    -   * Extensible method_name information enabling third-party users to mark a
    -   * SignatureDef as supporting a particular method. This enables producers and
    -   * consumers of SignatureDefs, e.g. a model definition library and a serving
    -   * library to have a clear hand-off regarding the semantics of a computation.
    -   * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -   * method_name. This is commonly used to support multi-headed computation,
    -   * where a single graph computation may return multiple results.
    +   * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +   * open-source TF Serving stopped checking by default since release 2.4.
    +   * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +   * supporting a particular method. Multiple SignatureDefs in a single
    +   * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +   * computation).
        * 
    * * string method_name = 3; @@ -342,13 +289,12 @@ public java.lang.String getMethodName() { } /** *
    -   * Extensible method_name information enabling third-party users to mark a
    -   * SignatureDef as supporting a particular method. This enables producers and
    -   * consumers of SignatureDefs, e.g. a model definition library and a serving
    -   * library to have a clear hand-off regarding the semantics of a computation.
    -   * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -   * method_name. This is commonly used to support multi-headed computation,
    -   * where a single graph computation may return multiple results.
    +   * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +   * open-source TF Serving stopped checking by default since release 2.4.
    +   * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +   * supporting a particular method. Multiple SignatureDefs in a single
    +   * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +   * computation).
        * 
    * * string method_name = 3; @@ -690,58 +636,6 @@ protected Builder newBuilderForType( *
        * SignatureDef defines the signature of a computation supported by a TensorFlow
        * graph.
    -   * For example, a model with two loss computations, sharing a single input,
    -   * might have the following signature_def map, in a MetaGraphDef message.
    -   * Note that across the two SignatureDefs "loss_A" and "loss_B", the input key,
    -   * output key, and method_name are identical, and will be used by system(s) that
    -   * implement or rely upon this particular loss method. The output tensor names
    -   * differ, demonstrating how different outputs can exist for the same method.
    -   * signature_def {
    -   *   key: "loss_A"
    -   *   value {
    -   *     inputs {
    -   *       key: "input"
    -   *       value {
    -   *         name: "input:0"
    -   *         dtype: DT_STRING
    -   *         tensor_shape: ...
    -   *       }
    -   *     }
    -   *     outputs {
    -   *       key: "loss_output"
    -   *       value {
    -   *         name: "loss_output_A:0"
    -   *         dtype: DT_FLOAT
    -   *         tensor_shape: ...
    -   *       }
    -   *     }
    -   *     method_name: "some/package/compute_loss"
    -   *   }
    -   *   ...
    -   * }
    -   * signature_def {
    -   *   key: "loss_B"
    -   *   value {
    -   *     inputs {
    -   *       key: "input"
    -   *       value {
    -   *         name: "input:0"
    -   *         dtype: DT_STRING
    -   *         tensor_shape: ...
    -   *       }
    -   *     }
    -   *     outputs {
    -   *       key: "loss_output"
    -   *       value {
    -   *         name: "loss_output_B:0"
    -   *         dtype: DT_FLOAT
    -   *         tensor_shape: ...
    -   *       }
    -   *     }
    -   *     method_name: "some/package/compute_loss"
    -   *   }
    -   *   ...
    -   * }
        * 
    * * Protobuf type {@code tensorflow.SignatureDef} @@ -1296,13 +1190,12 @@ public Builder putAllOutputs( private java.lang.Object methodName_ = ""; /** *
    -     * Extensible method_name information enabling third-party users to mark a
    -     * SignatureDef as supporting a particular method. This enables producers and
    -     * consumers of SignatureDefs, e.g. a model definition library and a serving
    -     * library to have a clear hand-off regarding the semantics of a computation.
    -     * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -     * method_name. This is commonly used to support multi-headed computation,
    -     * where a single graph computation may return multiple results.
    +     * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +     * open-source TF Serving stopped checking by default since release 2.4.
    +     * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +     * supporting a particular method. Multiple SignatureDefs in a single
    +     * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +     * computation).
          * 
    * * string method_name = 3; @@ -1322,13 +1215,12 @@ public java.lang.String getMethodName() { } /** *
    -     * Extensible method_name information enabling third-party users to mark a
    -     * SignatureDef as supporting a particular method. This enables producers and
    -     * consumers of SignatureDefs, e.g. a model definition library and a serving
    -     * library to have a clear hand-off regarding the semantics of a computation.
    -     * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -     * method_name. This is commonly used to support multi-headed computation,
    -     * where a single graph computation may return multiple results.
    +     * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +     * open-source TF Serving stopped checking by default since release 2.4.
    +     * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +     * supporting a particular method. Multiple SignatureDefs in a single
    +     * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +     * computation).
          * 
    * * string method_name = 3; @@ -1349,13 +1241,12 @@ public java.lang.String getMethodName() { } /** *
    -     * Extensible method_name information enabling third-party users to mark a
    -     * SignatureDef as supporting a particular method. This enables producers and
    -     * consumers of SignatureDefs, e.g. a model definition library and a serving
    -     * library to have a clear hand-off regarding the semantics of a computation.
    -     * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -     * method_name. This is commonly used to support multi-headed computation,
    -     * where a single graph computation may return multiple results.
    +     * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +     * open-source TF Serving stopped checking by default since release 2.4.
    +     * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +     * supporting a particular method. Multiple SignatureDefs in a single
    +     * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +     * computation).
          * 
    * * string method_name = 3; @@ -1374,13 +1265,12 @@ public Builder setMethodName( } /** *
    -     * Extensible method_name information enabling third-party users to mark a
    -     * SignatureDef as supporting a particular method. This enables producers and
    -     * consumers of SignatureDefs, e.g. a model definition library and a serving
    -     * library to have a clear hand-off regarding the semantics of a computation.
    -     * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -     * method_name. This is commonly used to support multi-headed computation,
    -     * where a single graph computation may return multiple results.
    +     * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +     * open-source TF Serving stopped checking by default since release 2.4.
    +     * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +     * supporting a particular method. Multiple SignatureDefs in a single
    +     * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +     * computation).
          * 
    * * string method_name = 3; @@ -1394,13 +1284,12 @@ public Builder clearMethodName() { } /** *
    -     * Extensible method_name information enabling third-party users to mark a
    -     * SignatureDef as supporting a particular method. This enables producers and
    -     * consumers of SignatureDefs, e.g. a model definition library and a serving
    -     * library to have a clear hand-off regarding the semantics of a computation.
    -     * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -     * method_name. This is commonly used to support multi-headed computation,
    -     * where a single graph computation may return multiple results.
    +     * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +     * open-source TF Serving stopped checking by default since release 2.4.
    +     * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +     * supporting a particular method. Multiple SignatureDefs in a single
    +     * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +     * computation).
          * 
    * * string method_name = 3; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDefOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDefOrBuilder.java index 86ae1bcf3d1..28bd86c8f8a 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDefOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/SignatureDefOrBuilder.java @@ -121,13 +121,12 @@ org.tensorflow.proto.TensorInfo getOutputsOrThrow( /** *
    -   * Extensible method_name information enabling third-party users to mark a
    -   * SignatureDef as supporting a particular method. This enables producers and
    -   * consumers of SignatureDefs, e.g. a model definition library and a serving
    -   * library to have a clear hand-off regarding the semantics of a computation.
    -   * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -   * method_name. This is commonly used to support multi-headed computation,
    -   * where a single graph computation may return multiple results.
    +   * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +   * open-source TF Serving stopped checking by default since release 2.4.
    +   * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +   * supporting a particular method. Multiple SignatureDefs in a single
    +   * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +   * computation).
        * 
    * * string method_name = 3; @@ -136,13 +135,12 @@ org.tensorflow.proto.TensorInfo getOutputsOrThrow( java.lang.String getMethodName(); /** *
    -   * Extensible method_name information enabling third-party users to mark a
    -   * SignatureDef as supporting a particular method. This enables producers and
    -   * consumers of SignatureDefs, e.g. a model definition library and a serving
    -   * library to have a clear hand-off regarding the semantics of a computation.
    -   * Note that multiple SignatureDefs in a single MetaGraphDef may have the same
    -   * method_name. This is commonly used to support multi-headed computation,
    -   * where a single graph computation may return multiple results.
    +   * Deprecated: TensorFlow 2 always sets this to a fixed value;
    +   * open-source TF Serving stopped checking by default since release 2.4.
    +   * In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
    +   * supporting a particular method. Multiple SignatureDefs in a single
    +   * MetaGraphDef could have the same method_name (e.g., to support multi-headed
    +   * computation).
        * 
    * * string method_name = 3; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProto.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProto.java index 0440777955e..ef4157a3352 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProto.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProto.java @@ -66,6 +66,10 @@ protected java.lang.Object newInstance( public static final int DTYPE_FIELD_NUMBER = 1; private int dtype_; /** + *
    +   * Data type of the tensor.
    +   * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ @@ -73,6 +77,10 @@ protected java.lang.Object newInstance( return dtype_; } /** + *
    +   * Data type of the tensor.
    +   * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ @@ -1929,6 +1937,10 @@ public Builder mergeFrom( private int dtype_ = 0; /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ @@ -1936,6 +1948,10 @@ public Builder mergeFrom( return dtype_; } /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @param value The enum numeric value on the wire for dtype to set. * @return This builder for chaining. @@ -1947,6 +1963,10 @@ public Builder setDtypeValue(int value) { return this; } /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ @@ -1957,6 +1977,10 @@ public org.tensorflow.proto.DataType getDtype() { return result == null ? org.tensorflow.proto.DataType.UNRECOGNIZED : result; } /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @param value The dtype to set. * @return This builder for chaining. @@ -1971,6 +1995,10 @@ public Builder setDtype(org.tensorflow.proto.DataType value) { return this; } /** + *
    +     * Data type of the tensor.
    +     * 
    + * * .tensorflow.DataType dtype = 1; * @return This builder for chaining. */ diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProtoOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProtoOrBuilder.java index fe901586e8c..9eafe8177e2 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProtoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TensorProtoOrBuilder.java @@ -8,11 +8,19 @@ public interface TensorProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { /** + *
    +   * Data type of the tensor.
    +   * 
    + * * .tensorflow.DataType dtype = 1; * @return The enum numeric value on the wire for dtype. */ int getDtypeValue(); /** + *
    +   * Data type of the tensor.
    +   * 
    + * * .tensorflow.DataType dtype = 1; * @return The dtype. */ diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestLogProtos.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestLogProtos.java index fb587acc9e8..f56cbf6b82b 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestLogProtos.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestLogProtos.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; @@ -108,68 +108,68 @@ public static void registerAllExtensions( descriptor; static { java.lang.String[] descriptorData = { - "\n\033tsl/protobuf/test_log.proto\022\ntensorflo" + - "w\032\031google/protobuf/any.proto\032\036google/pro" + - "tobuf/wrappers.proto\"D\n\nEntryValue\022\026\n\014do" + - "uble_value\030\001 \001(\001H\000\022\026\n\014string_value\030\002 \001(\t" + - "H\000B\006\n\004kind\"\214\001\n\013MetricEntry\022\014\n\004name\030\001 \001(\t" + - "\022\r\n\005value\030\002 \001(\001\022/\n\tmin_value\030\003 \001(\0132\034.goo" + - "gle.protobuf.DoubleValue\022/\n\tmax_value\030\004 " + - "\001(\0132\034.google.protobuf.DoubleValue\"\217\002\n\016Be" + - "nchmarkEntry\022\014\n\004name\030\001 \001(\t\022\r\n\005iters\030\002 \001(" + - "\003\022\020\n\010cpu_time\030\003 \001(\001\022\021\n\twall_time\030\004 \001(\001\022\022" + - "\n\nthroughput\030\005 \001(\001\0226\n\006extras\030\006 \003(\0132&.ten" + - "sorflow.BenchmarkEntry.ExtrasEntry\022(\n\007me" + - "trics\030\007 \003(\0132\027.tensorflow.MetricEntry\032E\n\013" + - "ExtrasEntry\022\013\n\003key\030\001 \001(\t\022%\n\005value\030\002 \001(\0132" + - "\026.tensorflow.EntryValue:\0028\001\"=\n\020Benchmark" + - "Entries\022)\n\005entry\030\001 \003(\0132\032.tensorflow.Benc" + - "hmarkEntry\"B\n\022BuildConfiguration\022\014\n\004mode" + - "\030\001 \001(\t\022\020\n\010cc_flags\030\002 \003(\t\022\014\n\004opts\030\003 \003(\t\"f" + - "\n\010CommitId\022\024\n\nchangelist\030\001 \001(\003H\000\022\016\n\004hash" + - "\030\002 \001(\tH\000\022\020\n\010snapshot\030\003 \001(\t\022\032\n\022pending_ch" + - "angelist\030\004 \001(\003B\006\n\004kind\"\336\001\n\007CPUInfo\022\021\n\tnu" + - "m_cores\030\001 \001(\003\022\031\n\021num_cores_allowed\030\002 \001(\003" + - "\022\023\n\013mhz_per_cpu\030\003 \001(\001\022\020\n\010cpu_info\030\004 \001(\t\022" + - "\024\n\014cpu_governor\030\005 \001(\t\0226\n\ncache_size\030\006 \003(" + - "\0132\".tensorflow.CPUInfo.CacheSizeEntry\0320\n" + - "\016CacheSizeEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 " + - "\001(\003:\0028\001\".\n\nMemoryInfo\022\r\n\005total\030\001 \001(\003\022\021\n\t" + - "available\030\002 \001(\003\"6\n\007GPUInfo\022\r\n\005model\030\001 \001(" + - "\t\022\014\n\004uuid\030\002 \001(\t\022\016\n\006bus_id\030\003 \001(\t\"p\n\014Platf" + - "ormInfo\022\014\n\004bits\030\001 \001(\t\022\017\n\007linkage\030\002 \001(\t\022\017" + - "\n\007machine\030\003 \001(\t\022\017\n\007release\030\004 \001(\t\022\016\n\006syst" + - "em\030\005 \001(\t\022\017\n\007version\030\006 \001(\t\"e\n\023AvailableDe" + - "viceInfo\022\014\n\004name\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022\024\n\014" + - "memory_limit\030\003 \001(\003\022\034\n\024physical_descripti" + - "on\030\004 \001(\t\"\263\002\n\024MachineConfiguration\022\020\n\010hos" + - "tname\030\001 \001(\t\022\031\n\021serial_identifier\030\007 \001(\t\022/" + - "\n\rplatform_info\030\002 \001(\0132\030.tensorflow.Platf" + - "ormInfo\022%\n\010cpu_info\030\003 \001(\0132\023.tensorflow.C" + - "PUInfo\022)\n\013device_info\030\004 \003(\0132\024.google.pro" + - "tobuf.Any\022>\n\025available_device_info\030\005 \003(\013" + - "2\037.tensorflow.AvailableDeviceInfo\022+\n\013mem" + - "ory_info\030\006 \001(\0132\026.tensorflow.MemoryInfo\"\221" + - "\001\n\020RunConfiguration\022\020\n\010argument\030\001 \003(\t\022;\n" + - "\010env_vars\030\002 \003(\0132).tensorflow.RunConfigur" + - "ation.EnvVarsEntry\032.\n\014EnvVarsEntry\022\013\n\003ke" + - "y\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\320\004\n\013TestResul" + - "ts\022\016\n\006target\030\001 \001(\t\022-\n\007entries\030\002 \001(\0132\034.te" + - "nsorflow.BenchmarkEntries\022;\n\023build_confi" + - "guration\030\003 \001(\0132\036.tensorflow.BuildConfigu" + - "ration\022\'\n\tcommit_id\030\004 \001(\0132\024.tensorflow.C" + - "ommitId\022\022\n\nstart_time\030\005 \001(\003\022\020\n\010run_time\030" + - "\006 \001(\001\022?\n\025machine_configuration\030\007 \001(\0132 .t" + - "ensorflow.MachineConfiguration\0227\n\021run_co" + - "nfiguration\030\010 \001(\0132\034.tensorflow.RunConfig" + - "uration\022\014\n\004name\030\t \001(\t\022=\n\016benchmark_type\030" + - "\n \001(\0162%.tensorflow.TestResults.Benchmark" + - "Type\022\020\n\010run_mode\030\013 \001(\t\022\022\n\ntf_version\030\014 \001" + - "(\t\"\210\001\n\rBenchmarkType\022\013\n\007UNKNOWN\020\000\022\026\n\022CPP" + - "_MICROBENCHMARK\020\001\022\024\n\020PYTHON_BENCHMARK\020\002\022" + - "\025\n\021ANDROID_BENCHMARK\020\003\022\022\n\016EDGE_BENCHMARK" + - "\020\004\022\021\n\rIOS_BENCHMARK\020\005B*\n\024org.tensorflow." + - "protoB\rTestLogProtosP\001\370\001\001b\006proto3" + "\n\037xla/tsl/protobuf/test_log.proto\022\ntenso" + + "rflow\032\031google/protobuf/any.proto\032\036google" + + "/protobuf/wrappers.proto\"D\n\nEntryValue\022\026" + + "\n\014double_value\030\001 \001(\001H\000\022\026\n\014string_value\030\002" + + " \001(\tH\000B\006\n\004kind\"\214\001\n\013MetricEntry\022\014\n\004name\030\001" + + " \001(\t\022\r\n\005value\030\002 \001(\001\022/\n\tmin_value\030\003 \001(\0132\034" + + ".google.protobuf.DoubleValue\022/\n\tmax_valu" + + "e\030\004 \001(\0132\034.google.protobuf.DoubleValue\"\217\002" + + "\n\016BenchmarkEntry\022\014\n\004name\030\001 \001(\t\022\r\n\005iters\030" + + "\002 \001(\003\022\020\n\010cpu_time\030\003 \001(\001\022\021\n\twall_time\030\004 \001" + + "(\001\022\022\n\nthroughput\030\005 \001(\001\0226\n\006extras\030\006 \003(\0132&" + + ".tensorflow.BenchmarkEntry.ExtrasEntry\022(" + + "\n\007metrics\030\007 \003(\0132\027.tensorflow.MetricEntry" + + "\032E\n\013ExtrasEntry\022\013\n\003key\030\001 \001(\t\022%\n\005value\030\002 " + + "\001(\0132\026.tensorflow.EntryValue:\0028\001\"=\n\020Bench" + + "markEntries\022)\n\005entry\030\001 \003(\0132\032.tensorflow." + + "BenchmarkEntry\"B\n\022BuildConfiguration\022\014\n\004" + + "mode\030\001 \001(\t\022\020\n\010cc_flags\030\002 \003(\t\022\014\n\004opts\030\003 \003" + + "(\t\"f\n\010CommitId\022\024\n\nchangelist\030\001 \001(\003H\000\022\016\n\004" + + "hash\030\002 \001(\tH\000\022\020\n\010snapshot\030\003 \001(\t\022\032\n\022pendin" + + "g_changelist\030\004 \001(\003B\006\n\004kind\"\336\001\n\007CPUInfo\022\021" + + "\n\tnum_cores\030\001 \001(\003\022\031\n\021num_cores_allowed\030\002" + + " \001(\003\022\023\n\013mhz_per_cpu\030\003 \001(\001\022\020\n\010cpu_info\030\004 " + + "\001(\t\022\024\n\014cpu_governor\030\005 \001(\t\0226\n\ncache_size\030" + + "\006 \003(\0132\".tensorflow.CPUInfo.CacheSizeEntr" + + "y\0320\n\016CacheSizeEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu" + + "e\030\002 \001(\003:\0028\001\".\n\nMemoryInfo\022\r\n\005total\030\001 \001(\003" + + "\022\021\n\tavailable\030\002 \001(\003\"6\n\007GPUInfo\022\r\n\005model\030" + + "\001 \001(\t\022\014\n\004uuid\030\002 \001(\t\022\016\n\006bus_id\030\003 \001(\t\"p\n\014P" + + "latformInfo\022\014\n\004bits\030\001 \001(\t\022\017\n\007linkage\030\002 \001" + + "(\t\022\017\n\007machine\030\003 \001(\t\022\017\n\007release\030\004 \001(\t\022\016\n\006" + + "system\030\005 \001(\t\022\017\n\007version\030\006 \001(\t\"e\n\023Availab" + + "leDeviceInfo\022\014\n\004name\030\001 \001(\t\022\014\n\004type\030\002 \001(\t" + + "\022\024\n\014memory_limit\030\003 \001(\003\022\034\n\024physical_descr" + + "iption\030\004 \001(\t\"\263\002\n\024MachineConfiguration\022\020\n" + + "\010hostname\030\001 \001(\t\022\031\n\021serial_identifier\030\007 \001" + + "(\t\022/\n\rplatform_info\030\002 \001(\0132\030.tensorflow.P" + + "latformInfo\022%\n\010cpu_info\030\003 \001(\0132\023.tensorfl" + + "ow.CPUInfo\022)\n\013device_info\030\004 \003(\0132\024.google" + + ".protobuf.Any\022>\n\025available_device_info\030\005" + + " \003(\0132\037.tensorflow.AvailableDeviceInfo\022+\n" + + "\013memory_info\030\006 \001(\0132\026.tensorflow.MemoryIn" + + "fo\"\221\001\n\020RunConfiguration\022\020\n\010argument\030\001 \003(" + + "\t\022;\n\010env_vars\030\002 \003(\0132).tensorflow.RunConf" + + "iguration.EnvVarsEntry\032.\n\014EnvVarsEntry\022\013" + + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\320\004\n\013TestR" + + "esults\022\016\n\006target\030\001 \001(\t\022-\n\007entries\030\002 \001(\0132" + + "\034.tensorflow.BenchmarkEntries\022;\n\023build_c" + + "onfiguration\030\003 \001(\0132\036.tensorflow.BuildCon" + + "figuration\022\'\n\tcommit_id\030\004 \001(\0132\024.tensorfl" + + "ow.CommitId\022\022\n\nstart_time\030\005 \001(\003\022\020\n\010run_t" + + "ime\030\006 \001(\001\022?\n\025machine_configuration\030\007 \001(\013" + + "2 .tensorflow.MachineConfiguration\0227\n\021ru" + + "n_configuration\030\010 \001(\0132\034.tensorflow.RunCo" + + "nfiguration\022\014\n\004name\030\t \001(\t\022=\n\016benchmark_t" + + "ype\030\n \001(\0162%.tensorflow.TestResults.Bench" + + "markType\022\020\n\010run_mode\030\013 \001(\t\022\022\n\ntf_version" + + "\030\014 \001(\t\"\210\001\n\rBenchmarkType\022\013\n\007UNKNOWN\020\000\022\026\n" + + "\022CPP_MICROBENCHMARK\020\001\022\024\n\020PYTHON_BENCHMAR" + + "K\020\002\022\025\n\021ANDROID_BENCHMARK\020\003\022\022\n\016EDGE_BENCH" + + "MARK\020\004\022\021\n\rIOS_BENCHMARK\020\005B*\n\024org.tensorf" + + "low.protoB\rTestLogProtosP\001\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResults.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResults.java index 09ed588ef20..4bc27cbdde7 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResults.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResults.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResultsOrBuilder.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResultsOrBuilder.java index 3afd736b478..1d6f1545988 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResultsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/TestResultsOrBuilder.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: tsl/protobuf/test_log.proto +// source: xla/tsl/protobuf/test_log.proto package org.tensorflow.proto; diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/DatasetOptions.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/DatasetOptions.java index 4e244df2f12..424adecedbd 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/DatasetOptions.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/DatasetOptions.java @@ -381,6 +381,17 @@ public interface AutotuneOptionsOrBuilder extends */ org.tensorflow.proto.data.model.Model.AutotuneAlgorithm getAutotuneAlgorithm(); + /** + * int64 initial_parallelism = 5; + * @return Whether the initialParallelism field is set. + */ + boolean hasInitialParallelism(); + /** + * int64 initial_parallelism = 5; + * @return The initialParallelism. + */ + long getInitialParallelism(); + public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions.OptionalEnabledCase getOptionalEnabledCase(); public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions.OptionalCpuBudgetCase getOptionalCpuBudgetCase(); @@ -388,10 +399,12 @@ public interface AutotuneOptionsOrBuilder extends public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions.OptionalRamBudgetCase getOptionalRamBudgetCase(); public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions.OptionalAutotuneAlgorithmCase getOptionalAutotuneAlgorithmCase(); + + public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions.OptionalInitialParallelismCase getOptionalInitialParallelismCase(); } /** *
    -   * next: 5
    +   * next: 6
        * 
    * * Protobuf type {@code tensorflow.data.AutotuneOptions} @@ -589,6 +602,45 @@ public int getNumber() { optionalAutotuneAlgorithmCase_); } + private int optionalInitialParallelismCase_ = 0; + private java.lang.Object optionalInitialParallelism_; + public enum OptionalInitialParallelismCase + implements com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INITIAL_PARALLELISM(5), + OPTIONALINITIALPARALLELISM_NOT_SET(0); + private final int value; + private OptionalInitialParallelismCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalInitialParallelismCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalInitialParallelismCase forNumber(int value) { + switch (value) { + case 5: return INITIAL_PARALLELISM; + case 0: return OPTIONALINITIALPARALLELISM_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalInitialParallelismCase + getOptionalInitialParallelismCase() { + return OptionalInitialParallelismCase.forNumber( + optionalInitialParallelismCase_); + } + public static final int ENABLED_FIELD_NUMBER = 1; /** * bool enabled = 1; @@ -684,6 +736,27 @@ public org.tensorflow.proto.data.model.Model.AutotuneAlgorithm getAutotuneAlgori return org.tensorflow.proto.data.model.Model.AutotuneAlgorithm.DEFAULT; } + public static final int INITIAL_PARALLELISM_FIELD_NUMBER = 5; + /** + * int64 initial_parallelism = 5; + * @return Whether the initialParallelism field is set. + */ + @java.lang.Override + public boolean hasInitialParallelism() { + return optionalInitialParallelismCase_ == 5; + } + /** + * int64 initial_parallelism = 5; + * @return The initialParallelism. + */ + @java.lang.Override + public long getInitialParallelism() { + if (optionalInitialParallelismCase_ == 5) { + return (java.lang.Long) optionalInitialParallelism_; + } + return 0L; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -713,6 +786,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (optionalAutotuneAlgorithmCase_ == 4) { output.writeEnum(4, ((java.lang.Integer) optionalAutotuneAlgorithm_)); } + if (optionalInitialParallelismCase_ == 5) { + output.writeInt64( + 5, (long)((java.lang.Long) optionalInitialParallelism_)); + } getUnknownFields().writeTo(output); } @@ -741,6 +818,11 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeEnumSize(4, ((java.lang.Integer) optionalAutotuneAlgorithm_)); } + if (optionalInitialParallelismCase_ == 5) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size( + 5, (long)((java.lang.Long) optionalInitialParallelism_)); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -792,6 +874,15 @@ public boolean equals(final java.lang.Object obj) { case 0: default: } + if (!getOptionalInitialParallelismCase().equals(other.getOptionalInitialParallelismCase())) return false; + switch (optionalInitialParallelismCase_) { + case 5: + if (getInitialParallelism() + != other.getInitialParallelism()) return false; + break; + case 0: + default: + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -837,6 +928,15 @@ public int hashCode() { case 0: default: } + switch (optionalInitialParallelismCase_) { + case 5: + hash = (37 * hash) + INITIAL_PARALLELISM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getInitialParallelism()); + break; + case 0: + default: + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -934,7 +1034,7 @@ protected Builder newBuilderForType( } /** *
    -     * next: 5
    +     * next: 6
          * 
    * * Protobuf type {@code tensorflow.data.AutotuneOptions} @@ -977,6 +1077,8 @@ public Builder clear() { optionalRamBudget_ = null; optionalAutotuneAlgorithmCase_ = 0; optionalAutotuneAlgorithm_ = null; + optionalInitialParallelismCase_ = 0; + optionalInitialParallelism_ = null; return this; } @@ -1015,10 +1117,14 @@ public org.tensorflow.proto.data.DatasetOptions.AutotuneOptions buildPartial() { if (optionalAutotuneAlgorithmCase_ == 4) { result.optionalAutotuneAlgorithm_ = optionalAutotuneAlgorithm_; } + if (optionalInitialParallelismCase_ == 5) { + result.optionalInitialParallelism_ = optionalInitialParallelism_; + } result.optionalEnabledCase_ = optionalEnabledCase_; result.optionalCpuBudgetCase_ = optionalCpuBudgetCase_; result.optionalRamBudgetCase_ = optionalRamBudgetCase_; result.optionalAutotuneAlgorithmCase_ = optionalAutotuneAlgorithmCase_; + result.optionalInitialParallelismCase_ = optionalInitialParallelismCase_; onBuilt(); return result; } @@ -1103,6 +1209,15 @@ public Builder mergeFrom(org.tensorflow.proto.data.DatasetOptions.AutotuneOption break; } } + switch (other.getOptionalInitialParallelismCase()) { + case INITIAL_PARALLELISM: { + setInitialParallelism(other.getInitialParallelism()); + break; + } + case OPTIONALINITIALPARALLELISM_NOT_SET: { + break; + } + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1150,6 +1265,11 @@ public Builder mergeFrom( optionalAutotuneAlgorithm_ = rawValue; break; } // case 32 + case 40: { + optionalInitialParallelism_ = input.readInt64(); + optionalInitialParallelismCase_ = 5; + break; + } // case 40 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -1225,6 +1345,21 @@ public Builder clearOptionalAutotuneAlgorithm() { return this; } + private int optionalInitialParallelismCase_ = 0; + private java.lang.Object optionalInitialParallelism_; + public OptionalInitialParallelismCase + getOptionalInitialParallelismCase() { + return OptionalInitialParallelismCase.forNumber( + optionalInitialParallelismCase_); + } + + public Builder clearOptionalInitialParallelism() { + optionalInitialParallelismCase_ = 0; + optionalInitialParallelism_ = null; + onChanged(); + return this; + } + /** * bool enabled = 1; @@ -1419,6 +1554,47 @@ public Builder clearAutotuneAlgorithm() { } return this; } + + /** + * int64 initial_parallelism = 5; + * @return Whether the initialParallelism field is set. + */ + public boolean hasInitialParallelism() { + return optionalInitialParallelismCase_ == 5; + } + /** + * int64 initial_parallelism = 5; + * @return The initialParallelism. + */ + public long getInitialParallelism() { + if (optionalInitialParallelismCase_ == 5) { + return (java.lang.Long) optionalInitialParallelism_; + } + return 0L; + } + /** + * int64 initial_parallelism = 5; + * @param value The initialParallelism to set. + * @return This builder for chaining. + */ + public Builder setInitialParallelism(long value) { + optionalInitialParallelismCase_ = 5; + optionalInitialParallelism_ = value; + onChanged(); + return this; + } + /** + * int64 initial_parallelism = 5; + * @return This builder for chaining. + */ + public Builder clearInitialParallelism() { + if (optionalInitialParallelismCase_ == 5) { + optionalInitialParallelismCase_ = 0; + optionalInitialParallelism_ = null; + onChanged(); + } + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -2954,6 +3130,17 @@ public interface OptimizationOptionsOrBuilder extends */ boolean getInjectPrefetch(); + /** + * bool seq_interleave_prefetch = 21; + * @return Whether the seqInterleavePrefetch field is set. + */ + boolean hasSeqInterleavePrefetch(); + /** + * bool seq_interleave_prefetch = 21; + * @return The seqInterleavePrefetch. + */ + boolean getSeqInterleavePrefetch(); + public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions.OptionalApplyDefaultOptimizationsCase getOptionalApplyDefaultOptimizationsCase(); public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions.OptionalFilterFusionCase getOptionalFilterFusionCase(); @@ -2975,10 +3162,12 @@ public interface OptimizationOptionsOrBuilder extends public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions.OptionalFilterParallelizationCase getOptionalFilterParallelizationCase(); public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions.OptionalInjectPrefetchCase getOptionalInjectPrefetchCase(); + + public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions.OptionalSeqInterleavePrefetchCase getOptionalSeqInterleavePrefetchCase(); } /** *
    -   * next: 21
    +   * next: 22
        * 
    * * Protobuf type {@code tensorflow.data.OptimizationOptions} @@ -3449,6 +3638,45 @@ public int getNumber() { optionalInjectPrefetchCase_); } + private int optionalSeqInterleavePrefetchCase_ = 0; + private java.lang.Object optionalSeqInterleavePrefetch_; + public enum OptionalSeqInterleavePrefetchCase + implements com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + SEQ_INTERLEAVE_PREFETCH(21), + OPTIONALSEQINTERLEAVEPREFETCH_NOT_SET(0); + private final int value; + private OptionalSeqInterleavePrefetchCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalSeqInterleavePrefetchCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalSeqInterleavePrefetchCase forNumber(int value) { + switch (value) { + case 21: return SEQ_INTERLEAVE_PREFETCH; + case 0: return OPTIONALSEQINTERLEAVEPREFETCH_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalSeqInterleavePrefetchCase + getOptionalSeqInterleavePrefetchCase() { + return OptionalSeqInterleavePrefetchCase.forNumber( + optionalSeqInterleavePrefetchCase_); + } + public static final int APPLY_DEFAULT_OPTIMIZATIONS_FIELD_NUMBER = 1; /** * bool apply_default_optimizations = 1; @@ -3680,6 +3908,27 @@ public boolean getInjectPrefetch() { return false; } + public static final int SEQ_INTERLEAVE_PREFETCH_FIELD_NUMBER = 21; + /** + * bool seq_interleave_prefetch = 21; + * @return Whether the seqInterleavePrefetch field is set. + */ + @java.lang.Override + public boolean hasSeqInterleavePrefetch() { + return optionalSeqInterleavePrefetchCase_ == 21; + } + /** + * bool seq_interleave_prefetch = 21; + * @return The seqInterleavePrefetch. + */ + @java.lang.Override + public boolean getSeqInterleavePrefetch() { + if (optionalSeqInterleavePrefetchCase_ == 21) { + return (java.lang.Boolean) optionalSeqInterleavePrefetch_; + } + return false; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -3738,6 +3987,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBool( 19, (boolean)((java.lang.Boolean) optionalInjectPrefetch_)); } + if (optionalSeqInterleavePrefetchCase_ == 21) { + output.writeBool( + 21, (boolean)((java.lang.Boolean) optionalSeqInterleavePrefetch_)); + } getUnknownFields().writeTo(output); } @@ -3802,6 +4055,11 @@ public int getSerializedSize() { .computeBoolSize( 19, (boolean)((java.lang.Boolean) optionalInjectPrefetch_)); } + if (optionalSeqInterleavePrefetchCase_ == 21) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 21, (boolean)((java.lang.Boolean) optionalSeqInterleavePrefetch_)); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -3916,6 +4174,15 @@ public boolean equals(final java.lang.Object obj) { case 0: default: } + if (!getOptionalSeqInterleavePrefetchCase().equals(other.getOptionalSeqInterleavePrefetchCase())) return false; + switch (optionalSeqInterleavePrefetchCase_) { + case 21: + if (getSeqInterleavePrefetch() + != other.getSeqInterleavePrefetch()) return false; + break; + case 0: + default: + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -4026,6 +4293,15 @@ public int hashCode() { case 0: default: } + switch (optionalSeqInterleavePrefetchCase_) { + case 21: + hash = (37 * hash) + SEQ_INTERLEAVE_PREFETCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getSeqInterleavePrefetch()); + break; + case 0: + default: + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -4123,7 +4399,7 @@ protected Builder newBuilderForType( } /** *
    -     * next: 21
    +     * next: 22
          * 
    * * Protobuf type {@code tensorflow.data.OptimizationOptions} @@ -4180,6 +4456,8 @@ public Builder clear() { optionalFilterParallelization_ = null; optionalInjectPrefetchCase_ = 0; optionalInjectPrefetch_ = null; + optionalSeqInterleavePrefetchCase_ = 0; + optionalSeqInterleavePrefetch_ = null; return this; } @@ -4239,6 +4517,9 @@ public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions buildPartial if (optionalInjectPrefetchCase_ == 19) { result.optionalInjectPrefetch_ = optionalInjectPrefetch_; } + if (optionalSeqInterleavePrefetchCase_ == 21) { + result.optionalSeqInterleavePrefetch_ = optionalSeqInterleavePrefetch_; + } result.optionalApplyDefaultOptimizationsCase_ = optionalApplyDefaultOptimizationsCase_; result.optionalFilterFusionCase_ = optionalFilterFusionCase_; result.optionalMapAndBatchFusionCase_ = optionalMapAndBatchFusionCase_; @@ -4250,6 +4531,7 @@ public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions buildPartial result.optionalShuffleAndRepeatFusionCase_ = optionalShuffleAndRepeatFusionCase_; result.optionalFilterParallelizationCase_ = optionalFilterParallelizationCase_; result.optionalInjectPrefetchCase_ = optionalInjectPrefetchCase_; + result.optionalSeqInterleavePrefetchCase_ = optionalSeqInterleavePrefetchCase_; onBuilt(); return result; } @@ -4397,6 +4679,15 @@ public Builder mergeFrom(org.tensorflow.proto.data.DatasetOptions.OptimizationOp break; } } + switch (other.getOptionalSeqInterleavePrefetchCase()) { + case SEQ_INTERLEAVE_PREFETCH: { + setSeqInterleavePrefetch(other.getSeqInterleavePrefetch()); + break; + } + case OPTIONALSEQINTERLEAVEPREFETCH_NOT_SET: { + break; + } + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -4478,6 +4769,11 @@ public Builder mergeFrom( optionalInjectPrefetchCase_ = 19; break; } // case 152 + case 168: { + optionalSeqInterleavePrefetch_ = input.readBool(); + optionalSeqInterleavePrefetchCase_ = 21; + break; + } // case 168 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -4658,6 +4954,21 @@ public Builder clearOptionalInjectPrefetch() { return this; } + private int optionalSeqInterleavePrefetchCase_ = 0; + private java.lang.Object optionalSeqInterleavePrefetch_; + public OptionalSeqInterleavePrefetchCase + getOptionalSeqInterleavePrefetchCase() { + return OptionalSeqInterleavePrefetchCase.forNumber( + optionalSeqInterleavePrefetchCase_); + } + + public Builder clearOptionalSeqInterleavePrefetch() { + optionalSeqInterleavePrefetchCase_ = 0; + optionalSeqInterleavePrefetch_ = null; + onChanged(); + return this; + } + /** * bool apply_default_optimizations = 1; @@ -5109,6 +5420,47 @@ public Builder clearInjectPrefetch() { } return this; } + + /** + * bool seq_interleave_prefetch = 21; + * @return Whether the seqInterleavePrefetch field is set. + */ + public boolean hasSeqInterleavePrefetch() { + return optionalSeqInterleavePrefetchCase_ == 21; + } + /** + * bool seq_interleave_prefetch = 21; + * @return The seqInterleavePrefetch. + */ + public boolean getSeqInterleavePrefetch() { + if (optionalSeqInterleavePrefetchCase_ == 21) { + return (java.lang.Boolean) optionalSeqInterleavePrefetch_; + } + return false; + } + /** + * bool seq_interleave_prefetch = 21; + * @param value The seqInterleavePrefetch to set. + * @return This builder for chaining. + */ + public Builder setSeqInterleavePrefetch(boolean value) { + optionalSeqInterleavePrefetchCase_ = 21; + optionalSeqInterleavePrefetch_ = value; + onChanged(); + return this; + } + /** + * bool seq_interleave_prefetch = 21; + * @return This builder for chaining. + */ + public Builder clearSeqInterleavePrefetch() { + if (optionalSeqInterleavePrefetchCase_ == 21) { + optionalSeqInterleavePrefetchCase_ = 0; + optionalSeqInterleavePrefetch_ = null; + onChanged(); + } + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -5173,60 +5525,47 @@ public org.tensorflow.proto.data.DatasetOptions.OptimizationOptions getDefaultIn } - public interface ThreadingOptionsOrBuilder extends - // @@protoc_insertion_point(interface_extends:tensorflow.data.ThreadingOptions) + public interface ServiceOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.ServiceOptions) com.google.protobuf.MessageOrBuilder { /** - * int32 max_intra_op_parallelism = 1; - * @return Whether the maxIntraOpParallelism field is set. - */ - boolean hasMaxIntraOpParallelism(); - /** - * int32 max_intra_op_parallelism = 1; - * @return The maxIntraOpParallelism. - */ - int getMaxIntraOpParallelism(); - - /** - * int32 private_threadpool_size = 2; - * @return Whether the privateThreadpoolSize field is set. + * bool pinned = 1; + * @return Whether the pinned field is set. */ - boolean hasPrivateThreadpoolSize(); + boolean hasPinned(); /** - * int32 private_threadpool_size = 2; - * @return The privateThreadpoolSize. + * bool pinned = 1; + * @return The pinned. */ - int getPrivateThreadpoolSize(); - - public org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.OptionalMaxIntraOpParallelismCase getOptionalMaxIntraOpParallelismCase(); + boolean getPinned(); - public org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.OptionalPrivateThreadpoolSizeCase getOptionalPrivateThreadpoolSizeCase(); + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions.OptionalPinnedCase getOptionalPinnedCase(); } /** *
    -   * next: 3
    +   * next: 2
        * 
    * - * Protobuf type {@code tensorflow.data.ThreadingOptions} + * Protobuf type {@code tensorflow.data.ServiceOptions} */ - public static final class ThreadingOptions extends + public static final class ServiceOptions extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:tensorflow.data.ThreadingOptions) - ThreadingOptionsOrBuilder { + // @@protoc_insertion_point(message_implements:tensorflow.data.ServiceOptions) + ServiceOptionsOrBuilder { private static final long serialVersionUID = 0L; - // Use ThreadingOptions.newBuilder() to construct. - private ThreadingOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use ServiceOptions.newBuilder() to construct. + private ServiceOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private ThreadingOptions() { + private ServiceOptions() { } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { - return new ThreadingOptions(); + return new ServiceOptions(); } @java.lang.Override @@ -5236,26 +5575,26 @@ protected java.lang.Object newInstance( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ThreadingOptions_descriptor; + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ServiceOptions_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ServiceOptions_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.class, org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.Builder.class); + org.tensorflow.proto.data.DatasetOptions.ServiceOptions.class, org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder.class); } - private int optionalMaxIntraOpParallelismCase_ = 0; - private java.lang.Object optionalMaxIntraOpParallelism_; - public enum OptionalMaxIntraOpParallelismCase + private int optionalPinnedCase_ = 0; + private java.lang.Object optionalPinned_; + public enum OptionalPinnedCase implements com.google.protobuf.Internal.EnumLite, com.google.protobuf.AbstractMessage.InternalOneOfEnum { - MAX_INTRA_OP_PARALLELISM(1), - OPTIONALMAXINTRAOPPARALLELISM_NOT_SET(0); + PINNED(1), + OPTIONALPINNED_NOT_SET(0); private final int value; - private OptionalMaxIntraOpParallelismCase(int value) { + private OptionalPinnedCase(int value) { this.value = value; } /** @@ -5264,14 +5603,14 @@ private OptionalMaxIntraOpParallelismCase(int value) { * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated - public static OptionalMaxIntraOpParallelismCase valueOf(int value) { + public static OptionalPinnedCase valueOf(int value) { return forNumber(value); } - public static OptionalMaxIntraOpParallelismCase forNumber(int value) { + public static OptionalPinnedCase forNumber(int value) { switch (value) { - case 1: return MAX_INTRA_OP_PARALLELISM; - case 0: return OPTIONALMAXINTRAOPPARALLELISM_NOT_SET; + case 1: return PINNED; + case 0: return OPTIONALPINNED_NOT_SET; default: return null; } } @@ -5280,31 +5619,628 @@ public int getNumber() { } }; - public OptionalMaxIntraOpParallelismCase - getOptionalMaxIntraOpParallelismCase() { - return OptionalMaxIntraOpParallelismCase.forNumber( - optionalMaxIntraOpParallelismCase_); + public OptionalPinnedCase + getOptionalPinnedCase() { + return OptionalPinnedCase.forNumber( + optionalPinnedCase_); } - private int optionalPrivateThreadpoolSizeCase_ = 0; - private java.lang.Object optionalPrivateThreadpoolSize_; - public enum OptionalPrivateThreadpoolSizeCase - implements com.google.protobuf.Internal.EnumLite, - com.google.protobuf.AbstractMessage.InternalOneOfEnum { - PRIVATE_THREADPOOL_SIZE(2), - OPTIONALPRIVATETHREADPOOLSIZE_NOT_SET(0); - private final int value; - private OptionalPrivateThreadpoolSizeCase(int value) { - this.value = value; + public static final int PINNED_FIELD_NUMBER = 1; + /** + * bool pinned = 1; + * @return Whether the pinned field is set. + */ + @java.lang.Override + public boolean hasPinned() { + return optionalPinnedCase_ == 1; + } + /** + * bool pinned = 1; + * @return The pinned. + */ + @java.lang.Override + public boolean getPinned() { + if (optionalPinnedCase_ == 1) { + return (java.lang.Boolean) optionalPinned_; } - /** - * @param value The number of the enum to look for. - * @return The enum associated with the given number. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static OptionalPrivateThreadpoolSizeCase valueOf(int value) { - return forNumber(value); + return false; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalPinnedCase_ == 1) { + output.writeBool( + 1, (boolean)((java.lang.Boolean) optionalPinned_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalPinnedCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 1, (boolean)((java.lang.Boolean) optionalPinned_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.DatasetOptions.ServiceOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.DatasetOptions.ServiceOptions other = (org.tensorflow.proto.data.DatasetOptions.ServiceOptions) obj; + + if (!getOptionalPinnedCase().equals(other.getOptionalPinnedCase())) return false; + switch (optionalPinnedCase_) { + case 1: + if (getPinned() + != other.getPinned()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (optionalPinnedCase_) { + case 1: + hash = (37 * hash) + PINNED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getPinned()); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.DatasetOptions.ServiceOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * next: 2
    +     * 
    + * + * Protobuf type {@code tensorflow.data.ServiceOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.ServiceOptions) + org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ServiceOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ServiceOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DatasetOptions.ServiceOptions.class, org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.DatasetOptions.ServiceOptions.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + optionalPinnedCase_ = 0; + optionalPinned_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ServiceOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.DatasetOptions.ServiceOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions build() { + org.tensorflow.proto.data.DatasetOptions.ServiceOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions buildPartial() { + org.tensorflow.proto.data.DatasetOptions.ServiceOptions result = new org.tensorflow.proto.data.DatasetOptions.ServiceOptions(this); + if (optionalPinnedCase_ == 1) { + result.optionalPinned_ = optionalPinned_; + } + result.optionalPinnedCase_ = optionalPinnedCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.DatasetOptions.ServiceOptions) { + return mergeFrom((org.tensorflow.proto.data.DatasetOptions.ServiceOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.DatasetOptions.ServiceOptions other) { + if (other == org.tensorflow.proto.data.DatasetOptions.ServiceOptions.getDefaultInstance()) return this; + switch (other.getOptionalPinnedCase()) { + case PINNED: { + setPinned(other.getPinned()); + break; + } + case OPTIONALPINNED_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalPinned_ = input.readBool(); + optionalPinnedCase_ = 1; + break; + } // case 8 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int optionalPinnedCase_ = 0; + private java.lang.Object optionalPinned_; + public OptionalPinnedCase + getOptionalPinnedCase() { + return OptionalPinnedCase.forNumber( + optionalPinnedCase_); + } + + public Builder clearOptionalPinned() { + optionalPinnedCase_ = 0; + optionalPinned_ = null; + onChanged(); + return this; + } + + + /** + * bool pinned = 1; + * @return Whether the pinned field is set. + */ + public boolean hasPinned() { + return optionalPinnedCase_ == 1; + } + /** + * bool pinned = 1; + * @return The pinned. + */ + public boolean getPinned() { + if (optionalPinnedCase_ == 1) { + return (java.lang.Boolean) optionalPinned_; + } + return false; + } + /** + * bool pinned = 1; + * @param value The pinned to set. + * @return This builder for chaining. + */ + public Builder setPinned(boolean value) { + optionalPinnedCase_ = 1; + optionalPinned_ = value; + onChanged(); + return this; + } + /** + * bool pinned = 1; + * @return This builder for chaining. + */ + public Builder clearPinned() { + if (optionalPinnedCase_ == 1) { + optionalPinnedCase_ = 0; + optionalPinned_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.ServiceOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.ServiceOptions) + private static final org.tensorflow.proto.data.DatasetOptions.ServiceOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.DatasetOptions.ServiceOptions(); + } + + public static org.tensorflow.proto.data.DatasetOptions.ServiceOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ServiceOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ThreadingOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.ThreadingOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 max_intra_op_parallelism = 1; + * @return Whether the maxIntraOpParallelism field is set. + */ + boolean hasMaxIntraOpParallelism(); + /** + * int32 max_intra_op_parallelism = 1; + * @return The maxIntraOpParallelism. + */ + int getMaxIntraOpParallelism(); + + /** + * int32 private_threadpool_size = 2; + * @return Whether the privateThreadpoolSize field is set. + */ + boolean hasPrivateThreadpoolSize(); + /** + * int32 private_threadpool_size = 2; + * @return The privateThreadpoolSize. + */ + int getPrivateThreadpoolSize(); + + public org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.OptionalMaxIntraOpParallelismCase getOptionalMaxIntraOpParallelismCase(); + + public org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.OptionalPrivateThreadpoolSizeCase getOptionalPrivateThreadpoolSizeCase(); + } + /** + *
    +   * next: 3
    +   * 
    + * + * Protobuf type {@code tensorflow.data.ThreadingOptions} + */ + public static final class ThreadingOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.ThreadingOptions) + ThreadingOptionsOrBuilder { + private static final long serialVersionUID = 0L; + // Use ThreadingOptions.newBuilder() to construct. + private ThreadingOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ThreadingOptions() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ThreadingOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ThreadingOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptions.internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.class, org.tensorflow.proto.data.DatasetOptions.ThreadingOptions.Builder.class); + } + + private int optionalMaxIntraOpParallelismCase_ = 0; + private java.lang.Object optionalMaxIntraOpParallelism_; + public enum OptionalMaxIntraOpParallelismCase + implements com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + MAX_INTRA_OP_PARALLELISM(1), + OPTIONALMAXINTRAOPPARALLELISM_NOT_SET(0); + private final int value; + private OptionalMaxIntraOpParallelismCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalMaxIntraOpParallelismCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalMaxIntraOpParallelismCase forNumber(int value) { + switch (value) { + case 1: return MAX_INTRA_OP_PARALLELISM; + case 0: return OPTIONALMAXINTRAOPPARALLELISM_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalMaxIntraOpParallelismCase + getOptionalMaxIntraOpParallelismCase() { + return OptionalMaxIntraOpParallelismCase.forNumber( + optionalMaxIntraOpParallelismCase_); + } + + private int optionalPrivateThreadpoolSizeCase_ = 0; + private java.lang.Object optionalPrivateThreadpoolSize_; + public enum OptionalPrivateThreadpoolSizeCase + implements com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PRIVATE_THREADPOOL_SIZE(2), + OPTIONALPRIVATETHREADPOOLSIZE_NOT_SET(0); + private final int value; + private OptionalPrivateThreadpoolSizeCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalPrivateThreadpoolSizeCase valueOf(int value) { + return forNumber(value); } public static OptionalPrivateThreadpoolSizeCase forNumber(int value) { @@ -5935,6 +6871,64 @@ public interface OptionsOrBuilder extends // @@protoc_insertion_point(interface_extends:tensorflow.data.Options) com.google.protobuf.MessageOrBuilder { + /** + * string dataset_name = 10; + * @return Whether the datasetName field is set. + */ + boolean hasDatasetName(); + /** + * string dataset_name = 10; + * @return The datasetName. + */ + java.lang.String getDatasetName(); + /** + * string dataset_name = 10; + * @return The bytes for datasetName. + */ + com.google.protobuf.ByteString + getDatasetNameBytes(); + + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @return A list containing the frameworkType. + */ + java.util.List + getFrameworkTypeList(); + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @return The count of frameworkType. + */ + int getFrameworkTypeCount(); + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @param index The index of the element to return. + * @return The frameworkType at the given index. + */ + java.lang.String getFrameworkType(int index); + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @param index The index of the value to return. + * @return The bytes of the frameworkType at the given index. + */ + com.google.protobuf.ByteString + getFrameworkTypeBytes(int index); + /** * bool deterministic = 1; * @return Whether the deterministic field is set. @@ -6027,6 +7021,33 @@ public interface OptionsOrBuilder extends */ org.tensorflow.proto.data.DatasetOptions.OptimizationOptionsOrBuilder getOptimizationOptionsOrBuilder(); + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return Whether the serviceOptions field is set. + */ + boolean hasServiceOptions(); + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return The serviceOptions. + */ + org.tensorflow.proto.data.DatasetOptions.ServiceOptions getServiceOptions(); + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder getServiceOptionsOrBuilder(); + /** * bool slack = 4; * @return Whether the slack field is set. @@ -6103,6 +7124,8 @@ public interface OptionsOrBuilder extends */ boolean getWarmStart(); + public org.tensorflow.proto.data.DatasetOptions.Options.OptionalDatasetNameCase getOptionalDatasetNameCase(); + public org.tensorflow.proto.data.DatasetOptions.Options.OptionalDeterministicCase getOptionalDeterministicCase(); public org.tensorflow.proto.data.DatasetOptions.Options.OptionalSlackCase getOptionalSlackCase(); @@ -6117,7 +7140,7 @@ public interface OptionsOrBuilder extends *
        * Message stored with Dataset objects to control how datasets are processed and
        * optimized.
    -   * next: 10
    +   * next: 13
        * 
    * * Protobuf type {@code tensorflow.data.Options} @@ -6132,6 +7155,7 @@ private Options(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Options() { + frameworkType_ = com.google.protobuf.LazyStringArrayList.EMPTY; } @java.lang.Override @@ -6159,6 +7183,45 @@ protected java.lang.Object newInstance( org.tensorflow.proto.data.DatasetOptions.Options.class, org.tensorflow.proto.data.DatasetOptions.Options.Builder.class); } + private int optionalDatasetNameCase_ = 0; + private java.lang.Object optionalDatasetName_; + public enum OptionalDatasetNameCase + implements com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + DATASET_NAME(10), + OPTIONALDATASETNAME_NOT_SET(0); + private final int value; + private OptionalDatasetNameCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalDatasetNameCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalDatasetNameCase forNumber(int value) { + switch (value) { + case 10: return DATASET_NAME; + case 0: return OPTIONALDATASETNAME_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalDatasetNameCase + getOptionalDatasetNameCase() { + return OptionalDatasetNameCase.forNumber( + optionalDatasetNameCase_); + } + private int optionalDeterministicCase_ = 0; private java.lang.Object optionalDeterministic_; public enum OptionalDeterministicCase @@ -6354,6 +7417,109 @@ public int getNumber() { optionalWarmStartCase_); } + public static final int DATASET_NAME_FIELD_NUMBER = 10; + /** + * string dataset_name = 10; + * @return Whether the datasetName field is set. + */ + public boolean hasDatasetName() { + return optionalDatasetNameCase_ == 10; + } + /** + * string dataset_name = 10; + * @return The datasetName. + */ + public java.lang.String getDatasetName() { + java.lang.Object ref = ""; + if (optionalDatasetNameCase_ == 10) { + ref = optionalDatasetName_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (optionalDatasetNameCase_ == 10) { + optionalDatasetName_ = s; + } + return s; + } + } + /** + * string dataset_name = 10; + * @return The bytes for datasetName. + */ + public com.google.protobuf.ByteString + getDatasetNameBytes() { + java.lang.Object ref = ""; + if (optionalDatasetNameCase_ == 10) { + ref = optionalDatasetName_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (optionalDatasetNameCase_ == 10) { + optionalDatasetName_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FRAMEWORK_TYPE_FIELD_NUMBER = 11; + private com.google.protobuf.LazyStringList frameworkType_; + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @return A list containing the frameworkType. + */ + public com.google.protobuf.ProtocolStringList + getFrameworkTypeList() { + return frameworkType_; + } + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @return The count of frameworkType. + */ + public int getFrameworkTypeCount() { + return frameworkType_.size(); + } + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @param index The index of the element to return. + * @return The frameworkType at the given index. + */ + public java.lang.String getFrameworkType(int index) { + return frameworkType_.get(index); + } + /** + *
    +     * List of frameworks used to generate this dataset.
    +     * 
    + * + * repeated string framework_type = 11; + * @param index The index of the value to return. + * @return The bytes of the frameworkType at the given index. + */ + public com.google.protobuf.ByteString + getFrameworkTypeBytes(int index) { + return frameworkType_.getByteString(index); + } + public static final int DETERMINISTIC_FIELD_NUMBER = 1; /** * bool deterministic = 1; @@ -6489,6 +7655,44 @@ public org.tensorflow.proto.data.DatasetOptions.OptimizationOptionsOrBuilder get return getOptimizationOptions(); } + public static final int SERVICE_OPTIONS_FIELD_NUMBER = 12; + private org.tensorflow.proto.data.DatasetOptions.ServiceOptions serviceOptions_; + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return Whether the serviceOptions field is set. + */ + @java.lang.Override + public boolean hasServiceOptions() { + return serviceOptions_ != null; + } + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return The serviceOptions. + */ + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions getServiceOptions() { + return serviceOptions_ == null ? org.tensorflow.proto.data.DatasetOptions.ServiceOptions.getDefaultInstance() : serviceOptions_; + } + /** + *
    +     * The tf.data service options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + @java.lang.Override + public org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder getServiceOptionsOrBuilder() { + return getServiceOptions(); + } + public static final int SLACK_FIELD_NUMBER = 4; /** * bool slack = 4; @@ -6667,6 +7871,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBool( 9, (boolean)((java.lang.Boolean) optionalWarmStart_)); } + if (optionalDatasetNameCase_ == 10) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 10, optionalDatasetName_); + } + for (int i = 0; i < frameworkType_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 11, frameworkType_.getRaw(i)); + } + if (serviceOptions_ != null) { + output.writeMessage(12, getServiceOptions()); + } getUnknownFields().writeTo(output); } @@ -6716,6 +7929,21 @@ public int getSerializedSize() { .computeBoolSize( 9, (boolean)((java.lang.Boolean) optionalWarmStart_)); } + if (optionalDatasetNameCase_ == 10) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, optionalDatasetName_); + } + { + int dataSize = 0; + for (int i = 0; i < frameworkType_.size(); i++) { + dataSize += computeStringSizeNoTag(frameworkType_.getRaw(i)); + } + size += dataSize; + size += 1 * getFrameworkTypeList().size(); + } + if (serviceOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(12, getServiceOptions()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -6731,6 +7959,8 @@ public boolean equals(final java.lang.Object obj) { } org.tensorflow.proto.data.DatasetOptions.Options other = (org.tensorflow.proto.data.DatasetOptions.Options) obj; + if (!getFrameworkTypeList() + .equals(other.getFrameworkTypeList())) return false; if (hasAutotuneOptions() != other.hasAutotuneOptions()) return false; if (hasAutotuneOptions()) { if (!getAutotuneOptions() @@ -6746,11 +7976,25 @@ public boolean equals(final java.lang.Object obj) { if (!getOptimizationOptions() .equals(other.getOptimizationOptions())) return false; } + if (hasServiceOptions() != other.hasServiceOptions()) return false; + if (hasServiceOptions()) { + if (!getServiceOptions() + .equals(other.getServiceOptions())) return false; + } if (hasThreadingOptions() != other.hasThreadingOptions()) return false; if (hasThreadingOptions()) { if (!getThreadingOptions() .equals(other.getThreadingOptions())) return false; } + if (!getOptionalDatasetNameCase().equals(other.getOptionalDatasetNameCase())) return false; + switch (optionalDatasetNameCase_) { + case 10: + if (!getDatasetName() + .equals(other.getDatasetName())) return false; + break; + case 0: + default: + } if (!getOptionalDeterministicCase().equals(other.getOptionalDeterministicCase())) return false; switch (optionalDeterministicCase_) { case 1: @@ -6807,6 +8051,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (getFrameworkTypeCount() > 0) { + hash = (37 * hash) + FRAMEWORK_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getFrameworkTypeList().hashCode(); + } if (hasAutotuneOptions()) { hash = (37 * hash) + AUTOTUNE_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getAutotuneOptions().hashCode(); @@ -6819,10 +8067,22 @@ public int hashCode() { hash = (37 * hash) + OPTIMIZATION_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getOptimizationOptions().hashCode(); } + if (hasServiceOptions()) { + hash = (37 * hash) + SERVICE_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getServiceOptions().hashCode(); + } if (hasThreadingOptions()) { hash = (37 * hash) + THREADING_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getThreadingOptions().hashCode(); } + switch (optionalDatasetNameCase_) { + case 10: + hash = (37 * hash) + DATASET_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDatasetName().hashCode(); + break; + case 0: + default: + } switch (optionalDeterministicCase_) { case 1: hash = (37 * hash) + DETERMINISTIC_FIELD_NUMBER; @@ -6966,7 +8226,7 @@ protected Builder newBuilderForType( *
          * Message stored with Dataset objects to control how datasets are processed and
          * optimized.
    -     * next: 10
    +     * next: 13
          * 
    * * Protobuf type {@code tensorflow.data.Options} @@ -7001,6 +8261,8 @@ private Builder( @java.lang.Override public Builder clear() { super.clear(); + frameworkType_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); if (autotuneOptionsBuilder_ == null) { autotuneOptions_ = null; } else { @@ -7019,12 +8281,20 @@ public Builder clear() { optimizationOptions_ = null; optimizationOptionsBuilder_ = null; } + if (serviceOptionsBuilder_ == null) { + serviceOptions_ = null; + } else { + serviceOptions_ = null; + serviceOptionsBuilder_ = null; + } if (threadingOptionsBuilder_ == null) { threadingOptions_ = null; } else { threadingOptions_ = null; threadingOptionsBuilder_ = null; } + optionalDatasetNameCase_ = 0; + optionalDatasetName_ = null; optionalDeterministicCase_ = 0; optionalDeterministic_ = null; optionalSlackCase_ = 0; @@ -7061,6 +8331,15 @@ public org.tensorflow.proto.data.DatasetOptions.Options build() { @java.lang.Override public org.tensorflow.proto.data.DatasetOptions.Options buildPartial() { org.tensorflow.proto.data.DatasetOptions.Options result = new org.tensorflow.proto.data.DatasetOptions.Options(this); + int from_bitField0_ = bitField0_; + if (optionalDatasetNameCase_ == 10) { + result.optionalDatasetName_ = optionalDatasetName_; + } + if (((bitField0_ & 0x00000001) != 0)) { + frameworkType_ = frameworkType_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.frameworkType_ = frameworkType_; if (optionalDeterministicCase_ == 1) { result.optionalDeterministic_ = optionalDeterministic_; } @@ -7079,6 +8358,11 @@ public org.tensorflow.proto.data.DatasetOptions.Options buildPartial() { } else { result.optimizationOptions_ = optimizationOptionsBuilder_.build(); } + if (serviceOptionsBuilder_ == null) { + result.serviceOptions_ = serviceOptions_; + } else { + result.serviceOptions_ = serviceOptionsBuilder_.build(); + } if (optionalSlackCase_ == 4) { result.optionalSlack_ = optionalSlack_; } @@ -7096,6 +8380,7 @@ public org.tensorflow.proto.data.DatasetOptions.Options buildPartial() { if (optionalWarmStartCase_ == 9) { result.optionalWarmStart_ = optionalWarmStart_; } + result.optionalDatasetNameCase_ = optionalDatasetNameCase_; result.optionalDeterministicCase_ = optionalDeterministicCase_; result.optionalSlackCase_ = optionalSlackCase_; result.optionalExternalStatePolicyCase_ = optionalExternalStatePolicyCase_; @@ -7149,6 +8434,16 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.tensorflow.proto.data.DatasetOptions.Options other) { if (other == org.tensorflow.proto.data.DatasetOptions.Options.getDefaultInstance()) return this; + if (!other.frameworkType_.isEmpty()) { + if (frameworkType_.isEmpty()) { + frameworkType_ = other.frameworkType_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFrameworkTypeIsMutable(); + frameworkType_.addAll(other.frameworkType_); + } + onChanged(); + } if (other.hasAutotuneOptions()) { mergeAutotuneOptions(other.getAutotuneOptions()); } @@ -7158,9 +8453,23 @@ public Builder mergeFrom(org.tensorflow.proto.data.DatasetOptions.Options other) if (other.hasOptimizationOptions()) { mergeOptimizationOptions(other.getOptimizationOptions()); } + if (other.hasServiceOptions()) { + mergeServiceOptions(other.getServiceOptions()); + } if (other.hasThreadingOptions()) { mergeThreadingOptions(other.getThreadingOptions()); } + switch (other.getOptionalDatasetNameCase()) { + case DATASET_NAME: { + optionalDatasetNameCase_ = 10; + optionalDatasetName_ = other.optionalDatasetName_; + onChanged(); + break; + } + case OPTIONALDATASETNAME_NOT_SET: { + break; + } + } switch (other.getOptionalDeterministicCase()) { case DETERMINISTIC: { setDeterministic(other.getDeterministic()); @@ -7286,6 +8595,25 @@ public Builder mergeFrom( optionalWarmStartCase_ = 9; break; } // case 72 + case 82: { + java.lang.String s = input.readStringRequireUtf8(); + optionalDatasetNameCase_ = 10; + optionalDatasetName_ = s; + break; + } // case 82 + case 90: { + java.lang.String s = input.readStringRequireUtf8(); + ensureFrameworkTypeIsMutable(); + frameworkType_.add(s); + break; + } // case 90 + case 98: { + input.readMessage( + getServiceOptionsFieldBuilder().getBuilder(), + extensionRegistry); + + break; + } // case 98 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -7301,6 +8629,21 @@ public Builder mergeFrom( } // finally return this; } + private int optionalDatasetNameCase_ = 0; + private java.lang.Object optionalDatasetName_; + public OptionalDatasetNameCase + getOptionalDatasetNameCase() { + return OptionalDatasetNameCase.forNumber( + optionalDatasetNameCase_); + } + + public Builder clearOptionalDatasetName() { + optionalDatasetNameCase_ = 0; + optionalDatasetName_ = null; + onChanged(); + return this; + } + private int optionalDeterministicCase_ = 0; private java.lang.Object optionalDeterministic_; public OptionalDeterministicCase @@ -7376,6 +8719,250 @@ public Builder clearOptionalWarmStart() { return this; } + private int bitField0_; + + /** + * string dataset_name = 10; + * @return Whether the datasetName field is set. + */ + @java.lang.Override + public boolean hasDatasetName() { + return optionalDatasetNameCase_ == 10; + } + /** + * string dataset_name = 10; + * @return The datasetName. + */ + @java.lang.Override + public java.lang.String getDatasetName() { + java.lang.Object ref = ""; + if (optionalDatasetNameCase_ == 10) { + ref = optionalDatasetName_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (optionalDatasetNameCase_ == 10) { + optionalDatasetName_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string dataset_name = 10; + * @return The bytes for datasetName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getDatasetNameBytes() { + java.lang.Object ref = ""; + if (optionalDatasetNameCase_ == 10) { + ref = optionalDatasetName_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (optionalDatasetNameCase_ == 10) { + optionalDatasetName_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string dataset_name = 10; + * @param value The datasetName to set. + * @return This builder for chaining. + */ + public Builder setDatasetName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + optionalDatasetNameCase_ = 10; + optionalDatasetName_ = value; + onChanged(); + return this; + } + /** + * string dataset_name = 10; + * @return This builder for chaining. + */ + public Builder clearDatasetName() { + if (optionalDatasetNameCase_ == 10) { + optionalDatasetNameCase_ = 0; + optionalDatasetName_ = null; + onChanged(); + } + return this; + } + /** + * string dataset_name = 10; + * @param value The bytes for datasetName to set. + * @return This builder for chaining. + */ + public Builder setDatasetNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + optionalDatasetNameCase_ = 10; + optionalDatasetName_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList frameworkType_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureFrameworkTypeIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + frameworkType_ = new com.google.protobuf.LazyStringArrayList(frameworkType_); + bitField0_ |= 0x00000001; + } + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @return A list containing the frameworkType. + */ + public com.google.protobuf.ProtocolStringList + getFrameworkTypeList() { + return frameworkType_.getUnmodifiableView(); + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @return The count of frameworkType. + */ + public int getFrameworkTypeCount() { + return frameworkType_.size(); + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param index The index of the element to return. + * @return The frameworkType at the given index. + */ + public java.lang.String getFrameworkType(int index) { + return frameworkType_.get(index); + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param index The index of the value to return. + * @return The bytes of the frameworkType at the given index. + */ + public com.google.protobuf.ByteString + getFrameworkTypeBytes(int index) { + return frameworkType_.getByteString(index); + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param index The index to set the value at. + * @param value The frameworkType to set. + * @return This builder for chaining. + */ + public Builder setFrameworkType( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFrameworkTypeIsMutable(); + frameworkType_.set(index, value); + onChanged(); + return this; + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param value The frameworkType to add. + * @return This builder for chaining. + */ + public Builder addFrameworkType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFrameworkTypeIsMutable(); + frameworkType_.add(value); + onChanged(); + return this; + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param values The frameworkType to add. + * @return This builder for chaining. + */ + public Builder addAllFrameworkType( + java.lang.Iterable values) { + ensureFrameworkTypeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, frameworkType_); + onChanged(); + return this; + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @return This builder for chaining. + */ + public Builder clearFrameworkType() { + frameworkType_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
    +       * List of frameworks used to generate this dataset.
    +       * 
    + * + * repeated string framework_type = 11; + * @param value The bytes of the frameworkType to add. + * @return This builder for chaining. + */ + public Builder addFrameworkTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFrameworkTypeIsMutable(); + frameworkType_.add(value); + onChanged(); + return this; + } /** * bool deterministic = 1; @@ -7883,6 +9470,161 @@ public org.tensorflow.proto.data.DatasetOptions.OptimizationOptionsOrBuilder get return optimizationOptionsBuilder_; } + private org.tensorflow.proto.data.DatasetOptions.ServiceOptions serviceOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DatasetOptions.ServiceOptions, org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder, org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder> serviceOptionsBuilder_; + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return Whether the serviceOptions field is set. + */ + public boolean hasServiceOptions() { + return serviceOptionsBuilder_ != null || serviceOptions_ != null; + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + * @return The serviceOptions. + */ + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions getServiceOptions() { + if (serviceOptionsBuilder_ == null) { + return serviceOptions_ == null ? org.tensorflow.proto.data.DatasetOptions.ServiceOptions.getDefaultInstance() : serviceOptions_; + } else { + return serviceOptionsBuilder_.getMessage(); + } + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public Builder setServiceOptions(org.tensorflow.proto.data.DatasetOptions.ServiceOptions value) { + if (serviceOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serviceOptions_ = value; + onChanged(); + } else { + serviceOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public Builder setServiceOptions( + org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder builderForValue) { + if (serviceOptionsBuilder_ == null) { + serviceOptions_ = builderForValue.build(); + onChanged(); + } else { + serviceOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public Builder mergeServiceOptions(org.tensorflow.proto.data.DatasetOptions.ServiceOptions value) { + if (serviceOptionsBuilder_ == null) { + if (serviceOptions_ != null) { + serviceOptions_ = + org.tensorflow.proto.data.DatasetOptions.ServiceOptions.newBuilder(serviceOptions_).mergeFrom(value).buildPartial(); + } else { + serviceOptions_ = value; + } + onChanged(); + } else { + serviceOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public Builder clearServiceOptions() { + if (serviceOptionsBuilder_ == null) { + serviceOptions_ = null; + onChanged(); + } else { + serviceOptions_ = null; + serviceOptionsBuilder_ = null; + } + + return this; + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder getServiceOptionsBuilder() { + + onChanged(); + return getServiceOptionsFieldBuilder().getBuilder(); + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + public org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder getServiceOptionsOrBuilder() { + if (serviceOptionsBuilder_ != null) { + return serviceOptionsBuilder_.getMessageOrBuilder(); + } else { + return serviceOptions_ == null ? + org.tensorflow.proto.data.DatasetOptions.ServiceOptions.getDefaultInstance() : serviceOptions_; + } + } + /** + *
    +       * The tf.data service options associated with the dataset.
    +       * 
    + * + * .tensorflow.data.ServiceOptions service_options = 12; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DatasetOptions.ServiceOptions, org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder, org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder> + getServiceOptionsFieldBuilder() { + if (serviceOptionsBuilder_ == null) { + serviceOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.DatasetOptions.ServiceOptions, org.tensorflow.proto.data.DatasetOptions.ServiceOptions.Builder, org.tensorflow.proto.data.DatasetOptions.ServiceOptionsOrBuilder>( + getServiceOptions(), + getParentForChildren(), + isClean()); + serviceOptions_ = null; + } + return serviceOptionsBuilder_; + } + /** * bool slack = 4; * @return Whether the slack field is set. @@ -8315,6 +10057,11 @@ public org.tensorflow.proto.data.DatasetOptions.Options getDefaultInstanceForTyp private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_ServiceOptions_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_ServiceOptions_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_tensorflow_data_ThreadingOptions_descriptor; private static final @@ -8336,56 +10083,64 @@ public org.tensorflow.proto.data.DatasetOptions.Options getDefaultInstanceForTyp java.lang.String[] descriptorData = { "\n/tensorflow/core/framework/dataset_opti" + "ons.proto\022\017tensorflow.data\032%tensorflow/c" + - "ore/framework/model.proto\"\371\001\n\017AutotuneOp" + + "ore/framework/model.proto\"\270\002\n\017AutotuneOp" + "tions\022\021\n\007enabled\030\001 \001(\010H\000\022\024\n\ncpu_budget\030\002" + " \001(\005H\001\022\024\n\nram_budget\030\003 \001(\003H\002\022F\n\022autotune" + "_algorithm\030\004 \001(\0162(.tensorflow.data.model" + - ".AutotuneAlgorithmH\003B\022\n\020optional_enabled" + - "B\025\n\023optional_cpu_budgetB\025\n\023optional_ram_" + - "budgetB\035\n\033optional_autotune_algorithm\"\321\001" + - "\n\022CardinalityOptions\022G\n\rcompute_level\030\001 " + - "\001(\01620.tensorflow.data.CardinalityOptions" + - ".ComputeLevel\"r\n\014ComputeLevel\022#\n\037CARDINA" + - "LITY_COMPUTE_UNSPECIFIED\020\000\022\033\n\027CARDINALIT" + - "Y_COMPUTE_LOW\020\001\022 \n\034CARDINALITY_COMPUTE_M" + - "ODERATE\020\002\"\177\n\021DistributeOptions\022;\n\021auto_s" + - "hard_policy\030\001 \001(\0162 .tensorflow.data.Auto" + - "ShardPolicy\022\025\n\013num_devices\030\002 \001(\005H\000B\026\n\024op" + - "tional_num_devices\"\362\005\n\023OptimizationOptio" + - "ns\022%\n\033apply_default_optimizations\030\001 \001(\010H" + - "\000\022\027\n\rfilter_fusion\030\006 \001(\010H\001\022\036\n\024map_and_ba" + - "tch_fusion\030\t \001(\010H\002\022\037\n\025map_and_filter_fus" + - "ion\030\n \001(\010H\003\022\024\n\nmap_fusion\030\013 \001(\010H\004\022\035\n\023map" + - "_parallelization\030\014 \001(\010H\005\022\032\n\020noop_elimina" + - "tion\030\016 \001(\010H\006\022\030\n\016parallel_batch\030\017 \001(\010H\007\022#" + - "\n\031shuffle_and_repeat_fusion\030\021 \001(\010H\010\022 \n\026f" + - "ilter_parallelization\030\022 \001(\010H\t\022\031\n\017inject_" + - "prefetch\030\023 \001(\010H\nB&\n$optional_apply_defau" + - "lt_optimizationsB\030\n\026optional_filter_fusi" + - "onB\037\n\035optional_map_and_batch_fusionB \n\036o" + - "ptional_map_and_filter_fusionB\025\n\023optiona" + - "l_map_fusionB\036\n\034optional_map_paralleliza" + - "tionB\033\n\031optional_noop_eliminationB\031\n\027opt" + - "ional_parallel_batchB$\n\"optional_shuffle" + - "_and_repeat_fusionB!\n\037optional_filter_pa" + - "rallelizationB\032\n\030optional_inject_prefetc" + - "hJ\004\010\002\020\003J\004\010\003\020\004J\004\010\004\020\005J\004\010\005\020\006J\004\010\007\020\010J\004\010\010\020\tJ\004\010" + - "\r\020\016J\004\010\020\020\021J\004\010\024\020\025\"\242\001\n\020ThreadingOptions\022\"\n\030" + - "max_intra_op_parallelism\030\001 \001(\005H\000\022!\n\027priv" + - "ate_threadpool_size\030\002 \001(\005H\001B#\n!optional_" + - "max_intra_op_parallelismB\"\n optional_pri" + - "vate_threadpool_size\"\262\004\n\007Options\022\027\n\rdete" + - "rministic\030\001 \001(\010H\000\022:\n\020autotune_options\030\007 " + - "\001(\0132 .tensorflow.data.AutotuneOptions\022>\n" + - "\022distribute_options\030\002 \001(\0132\".tensorflow.d" + - "ata.DistributeOptions\022B\n\024optimization_op" + - "tions\030\003 \001(\0132$.tensorflow.data.Optimizati" + - "onOptions\022\017\n\005slack\030\004 \001(\010H\001\022<\n\021threading_" + - "options\030\005 \001(\0132!.tensorflow.data.Threadin" + - "gOptions\022E\n\025external_state_policy\030\006 \001(\0162" + - "$.tensorflow.data.ExternalStatePolicyH\002\022" + - "\035\n\023symbolic_checkpoint\030\010 \001(\010H\003\022\024\n\nwarm_s" + - "tart\030\t \001(\010H\004B\030\n\026optional_deterministicB\020" + + ".AutotuneAlgorithmH\003\022\035\n\023initial_parallel" + + "ism\030\005 \001(\003H\004B\022\n\020optional_enabledB\025\n\023optio" + + "nal_cpu_budgetB\025\n\023optional_ram_budgetB\035\n" + + "\033optional_autotune_algorithmB\036\n\034optional" + + "_initial_parallelism\"\321\001\n\022CardinalityOpti" + + "ons\022G\n\rcompute_level\030\001 \001(\01620.tensorflow." + + "data.CardinalityOptions.ComputeLevel\"r\n\014" + + "ComputeLevel\022#\n\037CARDINALITY_COMPUTE_UNSP" + + "ECIFIED\020\000\022\033\n\027CARDINALITY_COMPUTE_LOW\020\001\022 " + + "\n\034CARDINALITY_COMPUTE_MODERATE\020\002\"\177\n\021Dist" + + "ributeOptions\022;\n\021auto_shard_policy\030\001 \001(\016" + + "2 .tensorflow.data.AutoShardPolicy\022\025\n\013nu" + + "m_devices\030\002 \001(\005H\000B\026\n\024optional_num_device" + + "s\"\271\006\n\023OptimizationOptions\022%\n\033apply_defau" + + "lt_optimizations\030\001 \001(\010H\000\022\027\n\rfilter_fusio" + + "n\030\006 \001(\010H\001\022\036\n\024map_and_batch_fusion\030\t \001(\010H" + + "\002\022\037\n\025map_and_filter_fusion\030\n \001(\010H\003\022\024\n\nma" + + "p_fusion\030\013 \001(\010H\004\022\035\n\023map_parallelization\030" + + "\014 \001(\010H\005\022\032\n\020noop_elimination\030\016 \001(\010H\006\022\030\n\016p" + + "arallel_batch\030\017 \001(\010H\007\022#\n\031shuffle_and_rep" + + "eat_fusion\030\021 \001(\010H\010\022 \n\026filter_paralleliza" + + "tion\030\022 \001(\010H\t\022\031\n\017inject_prefetch\030\023 \001(\010H\n\022" + + "!\n\027seq_interleave_prefetch\030\025 \001(\010H\013B&\n$op" + + "tional_apply_default_optimizationsB\030\n\026op" + + "tional_filter_fusionB\037\n\035optional_map_and" + + "_batch_fusionB \n\036optional_map_and_filter" + + "_fusionB\025\n\023optional_map_fusionB\036\n\034option" + + "al_map_parallelizationB\033\n\031optional_noop_" + + "eliminationB\031\n\027optional_parallel_batchB$" + + "\n\"optional_shuffle_and_repeat_fusionB!\n\037" + + "optional_filter_parallelizationB\032\n\030optio" + + "nal_inject_prefetchB\"\n optional_seq_inte" + + "rleave_prefetchJ\004\010\002\020\003J\004\010\003\020\004J\004\010\004\020\005J\004\010\005\020\006J" + + "\004\010\007\020\010J\004\010\010\020\tJ\004\010\r\020\016J\004\010\020\020\021J\004\010\024\020\025\"5\n\016Service" + + "Options\022\020\n\006pinned\030\001 \001(\010H\000B\021\n\017optional_pi" + + "nned\"\242\001\n\020ThreadingOptions\022\"\n\030max_intra_o" + + "p_parallelism\030\001 \001(\005H\000\022!\n\027private_threadp" + + "ool_size\030\002 \001(\005H\001B#\n!optional_max_intra_o" + + "p_parallelismB\"\n optional_private_thread" + + "pool_size\"\265\005\n\007Options\022\026\n\014dataset_name\030\n " + + "\001(\tH\000\022\026\n\016framework_type\030\013 \003(\t\022\027\n\rdetermi" + + "nistic\030\001 \001(\010H\001\022:\n\020autotune_options\030\007 \001(\013" + + "2 .tensorflow.data.AutotuneOptions\022>\n\022di" + + "stribute_options\030\002 \001(\0132\".tensorflow.data" + + ".DistributeOptions\022B\n\024optimization_optio" + + "ns\030\003 \001(\0132$.tensorflow.data.OptimizationO" + + "ptions\0228\n\017service_options\030\014 \001(\0132\037.tensor" + + "flow.data.ServiceOptions\022\017\n\005slack\030\004 \001(\010H" + + "\002\022<\n\021threading_options\030\005 \001(\0132!.tensorflo" + + "w.data.ThreadingOptions\022E\n\025external_stat" + + "e_policy\030\006 \001(\0162$.tensorflow.data.Externa" + + "lStatePolicyH\003\022\035\n\023symbolic_checkpoint\030\010 " + + "\001(\010H\004\022\024\n\nwarm_start\030\t \001(\010H\005B\027\n\025optional_" + + "dataset_nameB\030\n\026optional_deterministicB\020" + "\n\016optional_slackB \n\036optional_external_st" + "ate_policyB\036\n\034optional_symbolic_checkpoi" + "ntB\025\n\023optional_warm_start*K\n\017AutoShardPo" + @@ -8407,7 +10162,7 @@ public org.tensorflow.proto.data.DatasetOptions.Options getDefaultInstanceForTyp internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_AutotuneOptions_descriptor, - new java.lang.String[] { "Enabled", "CpuBudget", "RamBudget", "AutotuneAlgorithm", "OptionalEnabled", "OptionalCpuBudget", "OptionalRamBudget", "OptionalAutotuneAlgorithm", }); + new java.lang.String[] { "Enabled", "CpuBudget", "RamBudget", "AutotuneAlgorithm", "InitialParallelism", "OptionalEnabled", "OptionalCpuBudget", "OptionalRamBudget", "OptionalAutotuneAlgorithm", "OptionalInitialParallelism", }); internal_static_tensorflow_data_CardinalityOptions_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_tensorflow_data_CardinalityOptions_fieldAccessorTable = new @@ -8425,19 +10180,25 @@ public org.tensorflow.proto.data.DatasetOptions.Options getDefaultInstanceForTyp internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_OptimizationOptions_descriptor, - new java.lang.String[] { "ApplyDefaultOptimizations", "FilterFusion", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "NoopElimination", "ParallelBatch", "ShuffleAndRepeatFusion", "FilterParallelization", "InjectPrefetch", "OptionalApplyDefaultOptimizations", "OptionalFilterFusion", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalShuffleAndRepeatFusion", "OptionalFilterParallelization", "OptionalInjectPrefetch", }); - internal_static_tensorflow_data_ThreadingOptions_descriptor = + new java.lang.String[] { "ApplyDefaultOptimizations", "FilterFusion", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "NoopElimination", "ParallelBatch", "ShuffleAndRepeatFusion", "FilterParallelization", "InjectPrefetch", "SeqInterleavePrefetch", "OptionalApplyDefaultOptimizations", "OptionalFilterFusion", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalShuffleAndRepeatFusion", "OptionalFilterParallelization", "OptionalInjectPrefetch", "OptionalSeqInterleavePrefetch", }); + internal_static_tensorflow_data_ServiceOptions_descriptor = getDescriptor().getMessageTypes().get(4); + internal_static_tensorflow_data_ServiceOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_ServiceOptions_descriptor, + new java.lang.String[] { "Pinned", "OptionalPinned", }); + internal_static_tensorflow_data_ThreadingOptions_descriptor = + getDescriptor().getMessageTypes().get(5); internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_ThreadingOptions_descriptor, new java.lang.String[] { "MaxIntraOpParallelism", "PrivateThreadpoolSize", "OptionalMaxIntraOpParallelism", "OptionalPrivateThreadpoolSize", }); internal_static_tensorflow_data_Options_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_tensorflow_data_Options_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_Options_descriptor, - new java.lang.String[] { "Deterministic", "AutotuneOptions", "DistributeOptions", "OptimizationOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "SymbolicCheckpoint", "WarmStart", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", "OptionalSymbolicCheckpoint", "OptionalWarmStart", }); + new java.lang.String[] { "DatasetName", "FrameworkType", "Deterministic", "AutotuneOptions", "DistributeOptions", "OptimizationOptions", "ServiceOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "SymbolicCheckpoint", "WarmStart", "OptionalDatasetName", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", "OptionalSymbolicCheckpoint", "OptionalWarmStart", }); org.tensorflow.proto.data.model.Model.getDescriptor(); } diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java index 5d143f7c9f8..de029b2baa5 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java @@ -2261,7 +2261,11 @@ public interface WorkerConfigOrBuilder extends /** *
    -     * The protocol for the worker to use when transferring data to clients.
    +     * If set, the name of an alternative data transfer protocol for which the
    +     * worker starts an additional server ("data transfer server"); the trainer
    +     * can then get data from this server. If not set, no such server is started,
    +     * and the trainer can only get data from the regular worker server over
    +     * `protocol`.
          * 
    * * string data_transfer_protocol = 7; @@ -2270,7 +2274,11 @@ public interface WorkerConfigOrBuilder extends java.lang.String getDataTransferProtocol(); /** *
    -     * The protocol for the worker to use when transferring data to clients.
    +     * If set, the name of an alternative data transfer protocol for which the
    +     * worker starts an additional server ("data transfer server"); the trainer
    +     * can then get data from this server. If not set, no such server is started,
    +     * and the trainer can only get data from the regular worker server over
    +     * `protocol`.
          * 
    * * string data_transfer_protocol = 7; @@ -2281,9 +2289,21 @@ public interface WorkerConfigOrBuilder extends /** *
    -     * The data transfer address of the worker server. The substring "%port%", if
    -     * specified, will be replaced with the worker's bound port. This is useful
    -     * when the port is set to `0`.
    +     * If `data_transfer_protocol` is set, the port to which the data transfer
    +     * server binds. If set to `0`, the server binds to any available port.
    +     * 
    + * + * int64 data_transfer_port = 13; + * @return The dataTransferPort. + */ + long getDataTransferPort(); + + /** + *
    +     * If `data_transfer_protocol` is set, the address of the data transfer
    +     * server. The substring "%dts_port%" can be used to represent -- and is
    +     * replaced with -- the bound port of the data transfer server; this is useful
    +     * when `data_transfer_port` is set to `0`.
          * 
    * * string data_transfer_address = 8; @@ -2292,9 +2312,10 @@ public interface WorkerConfigOrBuilder extends java.lang.String getDataTransferAddress(); /** *
    -     * The data transfer address of the worker server. The substring "%port%", if
    -     * specified, will be replaced with the worker's bound port. This is useful
    -     * when the port is set to `0`.
    +     * If `data_transfer_protocol` is set, the address of the data transfer
    +     * server. The substring "%dts_port%" can be used to represent -- and is
    +     * replaced with -- the bound port of the data transfer server; this is useful
    +     * when `data_transfer_port` is set to `0`.
          * 
    * * string data_transfer_address = 8; @@ -2340,7 +2361,7 @@ public interface WorkerConfigOrBuilder extends /** *
        * Configuration for a tf.data service WorkerServer.
    -   * Next id: 13
    +   * Next id: 14
        * 
    * * Protobuf type {@code tensorflow.data.experimental.WorkerConfig} @@ -2646,7 +2667,11 @@ public long getDispatcherTimeoutMs() { private volatile java.lang.Object dataTransferProtocol_; /** *
    -     * The protocol for the worker to use when transferring data to clients.
    +     * If set, the name of an alternative data transfer protocol for which the
    +     * worker starts an additional server ("data transfer server"); the trainer
    +     * can then get data from this server. If not set, no such server is started,
    +     * and the trainer can only get data from the regular worker server over
    +     * `protocol`.
          * 
    * * string data_transfer_protocol = 7; @@ -2667,7 +2692,11 @@ public java.lang.String getDataTransferProtocol() { } /** *
    -     * The protocol for the worker to use when transferring data to clients.
    +     * If set, the name of an alternative data transfer protocol for which the
    +     * worker starts an additional server ("data transfer server"); the trainer
    +     * can then get data from this server. If not set, no such server is started,
    +     * and the trainer can only get data from the regular worker server over
    +     * `protocol`.
          * 
    * * string data_transfer_protocol = 7; @@ -2688,13 +2717,30 @@ public java.lang.String getDataTransferProtocol() { } } + public static final int DATA_TRANSFER_PORT_FIELD_NUMBER = 13; + private long dataTransferPort_; + /** + *
    +     * If `data_transfer_protocol` is set, the port to which the data transfer
    +     * server binds. If set to `0`, the server binds to any available port.
    +     * 
    + * + * int64 data_transfer_port = 13; + * @return The dataTransferPort. + */ + @java.lang.Override + public long getDataTransferPort() { + return dataTransferPort_; + } + public static final int DATA_TRANSFER_ADDRESS_FIELD_NUMBER = 8; private volatile java.lang.Object dataTransferAddress_; /** *
    -     * The data transfer address of the worker server. The substring "%port%", if
    -     * specified, will be replaced with the worker's bound port. This is useful
    -     * when the port is set to `0`.
    +     * If `data_transfer_protocol` is set, the address of the data transfer
    +     * server. The substring "%dts_port%" can be used to represent -- and is
    +     * replaced with -- the bound port of the data transfer server; this is useful
    +     * when `data_transfer_port` is set to `0`.
          * 
    * * string data_transfer_address = 8; @@ -2715,9 +2761,10 @@ public java.lang.String getDataTransferAddress() { } /** *
    -     * The data transfer address of the worker server. The substring "%port%", if
    -     * specified, will be replaced with the worker's bound port. This is useful
    -     * when the port is set to `0`.
    +     * If `data_transfer_protocol` is set, the address of the data transfer
    +     * server. The substring "%dts_port%" can be used to represent -- and is
    +     * replaced with -- the bound port of the data transfer server; this is useful
    +     * when `data_transfer_port` is set to `0`.
          * 
    * * string data_transfer_address = 8; @@ -2837,6 +2884,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (snapshotMaxChunkSizeBytes_ != 0L) { output.writeInt64(12, snapshotMaxChunkSizeBytes_); } + if (dataTransferPort_ != 0L) { + output.writeInt64(13, dataTransferPort_); + } getUnknownFields().writeTo(output); } @@ -2893,6 +2943,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(12, snapshotMaxChunkSizeBytes_); } + if (dataTransferPort_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(13, dataTransferPort_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -2924,6 +2978,8 @@ public boolean equals(final java.lang.Object obj) { != other.getDispatcherTimeoutMs()) return false; if (!getDataTransferProtocol() .equals(other.getDataTransferProtocol())) return false; + if (getDataTransferPort() + != other.getDataTransferPort()) return false; if (!getDataTransferAddress() .equals(other.getDataTransferAddress())) return false; if (getCrossTrainerCacheSizeBytes() @@ -2964,6 +3020,9 @@ public int hashCode() { getDispatcherTimeoutMs()); hash = (37 * hash) + DATA_TRANSFER_PROTOCOL_FIELD_NUMBER; hash = (53 * hash) + getDataTransferProtocol().hashCode(); + hash = (37 * hash) + DATA_TRANSFER_PORT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getDataTransferPort()); hash = (37 * hash) + DATA_TRANSFER_ADDRESS_FIELD_NUMBER; hash = (53 * hash) + getDataTransferAddress().hashCode(); hash = (37 * hash) + CROSS_TRAINER_CACHE_SIZE_BYTES_FIELD_NUMBER; @@ -3073,7 +3132,7 @@ protected Builder newBuilderForType( /** *
          * Configuration for a tf.data service WorkerServer.
    -     * Next id: 13
    +     * Next id: 14
          * 
    * * Protobuf type {@code tensorflow.data.experimental.WorkerConfig} @@ -3124,6 +3183,8 @@ public Builder clear() { dataTransferProtocol_ = ""; + dataTransferPort_ = 0L; + dataTransferAddress_ = ""; crossTrainerCacheSizeBytes_ = 0L; @@ -3171,6 +3232,7 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig buildPa result.heartbeatIntervalMs_ = heartbeatIntervalMs_; result.dispatcherTimeoutMs_ = dispatcherTimeoutMs_; result.dataTransferProtocol_ = dataTransferProtocol_; + result.dataTransferPort_ = dataTransferPort_; result.dataTransferAddress_ = dataTransferAddress_; result.crossTrainerCacheSizeBytes_ = crossTrainerCacheSizeBytes_; result.snapshotMaxChunkSizeBytes_ = snapshotMaxChunkSizeBytes_; @@ -3258,6 +3320,9 @@ public Builder mergeFrom(org.tensorflow.proto.data.experimental.ServiceConfig.Wo dataTransferProtocol_ = other.dataTransferProtocol_; onChanged(); } + if (other.getDataTransferPort() != 0L) { + setDataTransferPort(other.getDataTransferPort()); + } if (!other.getDataTransferAddress().isEmpty()) { dataTransferAddress_ = other.dataTransferAddress_; onChanged(); @@ -3358,6 +3423,11 @@ public Builder mergeFrom( break; } // case 96 + case 104: { + dataTransferPort_ = input.readInt64(); + + break; + } // case 104 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -3990,7 +4060,11 @@ public Builder clearDispatcherTimeoutMs() { private java.lang.Object dataTransferProtocol_ = ""; /** *
    -       * The protocol for the worker to use when transferring data to clients.
    +       * If set, the name of an alternative data transfer protocol for which the
    +       * worker starts an additional server ("data transfer server"); the trainer
    +       * can then get data from this server. If not set, no such server is started,
    +       * and the trainer can only get data from the regular worker server over
    +       * `protocol`.
            * 
    * * string data_transfer_protocol = 7; @@ -4010,7 +4084,11 @@ public java.lang.String getDataTransferProtocol() { } /** *
    -       * The protocol for the worker to use when transferring data to clients.
    +       * If set, the name of an alternative data transfer protocol for which the
    +       * worker starts an additional server ("data transfer server"); the trainer
    +       * can then get data from this server. If not set, no such server is started,
    +       * and the trainer can only get data from the regular worker server over
    +       * `protocol`.
            * 
    * * string data_transfer_protocol = 7; @@ -4031,7 +4109,11 @@ public java.lang.String getDataTransferProtocol() { } /** *
    -       * The protocol for the worker to use when transferring data to clients.
    +       * If set, the name of an alternative data transfer protocol for which the
    +       * worker starts an additional server ("data transfer server"); the trainer
    +       * can then get data from this server. If not set, no such server is started,
    +       * and the trainer can only get data from the regular worker server over
    +       * `protocol`.
            * 
    * * string data_transfer_protocol = 7; @@ -4050,7 +4132,11 @@ public Builder setDataTransferProtocol( } /** *
    -       * The protocol for the worker to use when transferring data to clients.
    +       * If set, the name of an alternative data transfer protocol for which the
    +       * worker starts an additional server ("data transfer server"); the trainer
    +       * can then get data from this server. If not set, no such server is started,
    +       * and the trainer can only get data from the regular worker server over
    +       * `protocol`.
            * 
    * * string data_transfer_protocol = 7; @@ -4064,7 +4150,11 @@ public Builder clearDataTransferProtocol() { } /** *
    -       * The protocol for the worker to use when transferring data to clients.
    +       * If set, the name of an alternative data transfer protocol for which the
    +       * worker starts an additional server ("data transfer server"); the trainer
    +       * can then get data from this server. If not set, no such server is started,
    +       * and the trainer can only get data from the regular worker server over
    +       * `protocol`.
            * 
    * * string data_transfer_protocol = 7; @@ -4083,12 +4173,59 @@ public Builder setDataTransferProtocolBytes( return this; } + private long dataTransferPort_ ; + /** + *
    +       * If `data_transfer_protocol` is set, the port to which the data transfer
    +       * server binds. If set to `0`, the server binds to any available port.
    +       * 
    + * + * int64 data_transfer_port = 13; + * @return The dataTransferPort. + */ + @java.lang.Override + public long getDataTransferPort() { + return dataTransferPort_; + } + /** + *
    +       * If `data_transfer_protocol` is set, the port to which the data transfer
    +       * server binds. If set to `0`, the server binds to any available port.
    +       * 
    + * + * int64 data_transfer_port = 13; + * @param value The dataTransferPort to set. + * @return This builder for chaining. + */ + public Builder setDataTransferPort(long value) { + + dataTransferPort_ = value; + onChanged(); + return this; + } + /** + *
    +       * If `data_transfer_protocol` is set, the port to which the data transfer
    +       * server binds. If set to `0`, the server binds to any available port.
    +       * 
    + * + * int64 data_transfer_port = 13; + * @return This builder for chaining. + */ + public Builder clearDataTransferPort() { + + dataTransferPort_ = 0L; + onChanged(); + return this; + } + private java.lang.Object dataTransferAddress_ = ""; /** *
    -       * The data transfer address of the worker server. The substring "%port%", if
    -       * specified, will be replaced with the worker's bound port. This is useful
    -       * when the port is set to `0`.
    +       * If `data_transfer_protocol` is set, the address of the data transfer
    +       * server. The substring "%dts_port%" can be used to represent -- and is
    +       * replaced with -- the bound port of the data transfer server; this is useful
    +       * when `data_transfer_port` is set to `0`.
            * 
    * * string data_transfer_address = 8; @@ -4108,9 +4245,10 @@ public java.lang.String getDataTransferAddress() { } /** *
    -       * The data transfer address of the worker server. The substring "%port%", if
    -       * specified, will be replaced with the worker's bound port. This is useful
    -       * when the port is set to `0`.
    +       * If `data_transfer_protocol` is set, the address of the data transfer
    +       * server. The substring "%dts_port%" can be used to represent -- and is
    +       * replaced with -- the bound port of the data transfer server; this is useful
    +       * when `data_transfer_port` is set to `0`.
            * 
    * * string data_transfer_address = 8; @@ -4131,9 +4269,10 @@ public java.lang.String getDataTransferAddress() { } /** *
    -       * The data transfer address of the worker server. The substring "%port%", if
    -       * specified, will be replaced with the worker's bound port. This is useful
    -       * when the port is set to `0`.
    +       * If `data_transfer_protocol` is set, the address of the data transfer
    +       * server. The substring "%dts_port%" can be used to represent -- and is
    +       * replaced with -- the bound port of the data transfer server; this is useful
    +       * when `data_transfer_port` is set to `0`.
            * 
    * * string data_transfer_address = 8; @@ -4152,9 +4291,10 @@ public Builder setDataTransferAddress( } /** *
    -       * The data transfer address of the worker server. The substring "%port%", if
    -       * specified, will be replaced with the worker's bound port. This is useful
    -       * when the port is set to `0`.
    +       * If `data_transfer_protocol` is set, the address of the data transfer
    +       * server. The substring "%dts_port%" can be used to represent -- and is
    +       * replaced with -- the bound port of the data transfer server; this is useful
    +       * when `data_transfer_port` is set to `0`.
            * 
    * * string data_transfer_address = 8; @@ -4168,9 +4308,10 @@ public Builder clearDataTransferAddress() { } /** *
    -       * The data transfer address of the worker server. The substring "%port%", if
    -       * specified, will be replaced with the worker's bound port. This is useful
    -       * when the port is set to `0`.
    +       * If `data_transfer_protocol` is set, the address of the data transfer
    +       * server. The substring "%dts_port%" can be used to represent -- and is
    +       * replaced with -- the bound port of the data transfer server; this is useful
    +       * when `data_transfer_port` is set to `0`.
            * 
    * * string data_transfer_address = 8; @@ -4424,19 +4565,20 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa "s\030\006 \001(\003\022 \n\030gc_dynamic_sharding_jobs\030\013 \001(" + "\010\022\031\n\021client_timeout_ms\030\010 \001(\003\022\031\n\021worker_t" + "imeout_ms\030\n \001(\003\022\'\n\037worker_max_concurrent" + - "_snapshots\030\014 \001(\003\"\345\002\n\014WorkerConfig\022\014\n\004por" + + "_snapshots\030\014 \001(\003\"\201\003\n\014WorkerConfig\022\014\n\004por" + "t\030\001 \001(\003\022\020\n\010protocol\030\002 \001(\t\022\032\n\022dispatcher_" + "address\030\003 \001(\t\022\026\n\016worker_address\030\004 \001(\t\022\023\n" + "\013worker_tags\030\n \003(\t\022\035\n\025heartbeat_interval" + "_ms\030\005 \001(\003\022\035\n\025dispatcher_timeout_ms\030\006 \001(\003" + - "\022\036\n\026data_transfer_protocol\030\007 \001(\t\022\035\n\025data" + - "_transfer_address\030\010 \001(\t\022&\n\036cross_trainer" + - "_cache_size_bytes\030\013 \001(\003\022%\n\035snapshot_max_" + - "chunk_size_bytes\030\014 \001(\003\022 \n\030shutdown_quiet" + - "_period_ms\030\t \001(\003B\177\n&org.tensorflow.proto" + - ".data.experimentalZUgithub.com/tensorflo" + - "w/tensorflow/tensorflow/go/core/protobuf" + - "/for_core_protos_go_protob\006proto3" + "\022\036\n\026data_transfer_protocol\030\007 \001(\t\022\032\n\022data" + + "_transfer_port\030\r \001(\003\022\035\n\025data_transfer_ad" + + "dress\030\010 \001(\t\022&\n\036cross_trainer_cache_size_" + + "bytes\030\013 \001(\003\022%\n\035snapshot_max_chunk_size_b" + + "ytes\030\014 \001(\003\022 \n\030shutdown_quiet_period_ms\030\t" + + " \001(\003B\177\n&org.tensorflow.proto.data.experi" + + "mentalZUgithub.com/tensorflow/tensorflow" + + "/tensorflow/go/core/protobuf/for_core_pr" + + "otos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -4454,7 +4596,7 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa internal_static_tensorflow_data_experimental_WorkerConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_experimental_WorkerConfig_descriptor, - new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "WorkerTags", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "DataTransferAddress", "CrossTrainerCacheSizeBytes", "SnapshotMaxChunkSizeBytes", "ShutdownQuietPeriodMs", }); + new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "WorkerTags", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "DataTransferPort", "DataTransferAddress", "CrossTrainerCacheSizeBytes", "SnapshotMaxChunkSizeBytes", "ShutdownQuietPeriodMs", }); org.tensorflow.proto.data.DataService.getDescriptor(); } diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/model/Model.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/model/Model.java index f03d10c2626..e89b1024d4c 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/model/Model.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/data/model/Model.java @@ -314,6 +314,26 @@ public interface ModelProtoOrBuilder extends // @@protoc_insertion_point(interface_extends:tensorflow.data.model.ModelProto) com.google.protobuf.MessageOrBuilder { + /** + *
    +     * User-defined name for the dataset. Empty if no name was set.
    +     * 
    + * + * string dataset_name = 7; + * @return The datasetName. + */ + java.lang.String getDatasetName(); + /** + *
    +     * User-defined name for the dataset. Empty if no name was set.
    +     * 
    + * + * string dataset_name = 7; + * @return The bytes for datasetName. + */ + com.google.protobuf.ByteString + getDatasetNameBytes(); + /** *
          * Map of node IDs to nodes of this model.
    @@ -440,6 +460,7 @@ private ModelProto(com.google.protobuf.GeneratedMessageV3.Builder builder) {
           super(builder);
         }
         private ModelProto() {
    +      datasetName_ = "";
           gapTimes_ = emptyLongList();
         }
     
    @@ -5007,6 +5028,52 @@ public org.tensorflow.proto.data.model.Model.ModelProto.OptimizationParams getDe
     
         }
     
    +    public static final int DATASET_NAME_FIELD_NUMBER = 7;
    +    private volatile java.lang.Object datasetName_;
    +    /**
    +     * 
    +     * User-defined name for the dataset. Empty if no name was set.
    +     * 
    + * + * string dataset_name = 7; + * @return The datasetName. + */ + @java.lang.Override + public java.lang.String getDatasetName() { + java.lang.Object ref = datasetName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + datasetName_ = s; + return s; + } + } + /** + *
    +     * User-defined name for the dataset. Empty if no name was set.
    +     * 
    + * + * string dataset_name = 7; + * @return The bytes for datasetName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getDatasetNameBytes() { + java.lang.Object ref = datasetName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + datasetName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int NODES_FIELD_NUMBER = 1; private static final class NodesDefaultEntryHolder { static final com.google.protobuf.MapEntry< @@ -5225,6 +5292,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) for (int i = 0; i < gapTimes_.size(); i++) { output.writeUInt64NoTag(gapTimes_.getLong(i)); } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, datasetName_); + } getUnknownFields().writeTo(output); } @@ -5270,6 +5340,9 @@ public int getSerializedSize() { } gapTimesMemoizedSerializedSize = dataSize; } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, datasetName_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -5285,6 +5358,8 @@ public boolean equals(final java.lang.Object obj) { } org.tensorflow.proto.data.model.Model.ModelProto other = (org.tensorflow.proto.data.model.Model.ModelProto) obj; + if (!getDatasetName() + .equals(other.getDatasetName())) return false; if (!internalGetNodes().equals( other.internalGetNodes())) return false; if (getOutput() @@ -5309,6 +5384,8 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATASET_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDatasetName().hashCode(); if (!internalGetNodes().getMap().isEmpty()) { hash = (37 * hash) + NODES_FIELD_NUMBER; hash = (53 * hash) + internalGetNodes().hashCode(); @@ -5482,6 +5559,8 @@ private Builder( @java.lang.Override public Builder clear() { super.clear(); + datasetName_ = ""; + internalGetMutableNodes().clear(); output_ = 0L; @@ -5522,6 +5601,7 @@ public org.tensorflow.proto.data.model.Model.ModelProto build() { public org.tensorflow.proto.data.model.Model.ModelProto buildPartial() { org.tensorflow.proto.data.model.Model.ModelProto result = new org.tensorflow.proto.data.model.Model.ModelProto(this); int from_bitField0_ = bitField0_; + result.datasetName_ = datasetName_; result.nodes_ = internalGetNodes(); result.nodes_.makeImmutable(); result.output_ = output_; @@ -5584,6 +5664,10 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.tensorflow.proto.data.model.Model.ModelProto other) { if (other == org.tensorflow.proto.data.model.Model.ModelProto.getDefaultInstance()) return this; + if (!other.getDatasetName().isEmpty()) { + datasetName_ = other.datasetName_; + onChanged(); + } internalGetMutableNodes().mergeFrom( other.internalGetNodes()); if (other.getOutput() != 0L) { @@ -5672,6 +5756,11 @@ public Builder mergeFrom( input.popLimit(limit); break; } // case 50 + case 58: { + datasetName_ = input.readStringRequireUtf8(); + + break; + } // case 58 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag @@ -5689,6 +5778,102 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object datasetName_ = ""; + /** + *
    +       * User-defined name for the dataset. Empty if no name was set.
    +       * 
    + * + * string dataset_name = 7; + * @return The datasetName. + */ + public java.lang.String getDatasetName() { + java.lang.Object ref = datasetName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + datasetName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +       * User-defined name for the dataset. Empty if no name was set.
    +       * 
    + * + * string dataset_name = 7; + * @return The bytes for datasetName. + */ + public com.google.protobuf.ByteString + getDatasetNameBytes() { + java.lang.Object ref = datasetName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + datasetName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +       * User-defined name for the dataset. Empty if no name was set.
    +       * 
    + * + * string dataset_name = 7; + * @param value The datasetName to set. + * @return This builder for chaining. + */ + public Builder setDatasetName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + datasetName_ = value; + onChanged(); + return this; + } + /** + *
    +       * User-defined name for the dataset. Empty if no name was set.
    +       * 
    + * + * string dataset_name = 7; + * @return This builder for chaining. + */ + public Builder clearDatasetName() { + + datasetName_ = getDefaultInstance().getDatasetName(); + onChanged(); + return this; + } + /** + *
    +       * User-defined name for the dataset. Empty if no name was set.
    +       * 
    + * + * string dataset_name = 7; + * @param value The bytes for datasetName to set. + * @return This builder for chaining. + */ + public Builder setDatasetNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + datasetName_ = value; + onChanged(); + return this; + } + private com.google.protobuf.MapField< java.lang.Long, org.tensorflow.proto.data.model.Model.ModelProto.Node> nodes_; private com.google.protobuf.MapField @@ -6230,43 +6415,43 @@ public org.tensorflow.proto.data.model.Model.ModelProto getDefaultInstanceForTyp static { java.lang.String[] descriptorData = { "\n%tensorflow/core/framework/model.proto\022" + - "\025tensorflow.data.model\"\207\010\n\nModelProto\022;\n" + - "\005nodes\030\001 \003(\0132,.tensorflow.data.model.Mod" + - "elProto.NodesEntry\022\016\n\006output\030\002 \001(\003\022\022\n\nid" + - "_counter\030\003 \001(\003\022Q\n\023optimization_params\030\005 " + - "\001(\01324.tensorflow.data.model.ModelProto.O" + - "ptimizationParams\022\021\n\tgap_times\030\006 \003(\004\032\277\004\n" + - "\004Node\022\n\n\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\020\n\010autot" + - "une\030\003 \001(\010\022\026\n\016buffered_bytes\030\004 \001(\003\022\031\n\021buf" + - "fered_elements\030\005 \001(\003\022\026\n\016bytes_consumed\030\006" + - " \001(\003\022\026\n\016bytes_produced\030\007 \001(\003\022\024\n\014num_elem" + - "ents\030\010 \001(\003\022\027\n\017processing_time\030\t \001(\003\022\026\n\016r" + - "ecord_metrics\030\n \001(\010\022D\n\nparameters\030\013 \003(\0132" + - "0.tensorflow.data.model.ModelProto.Node." + - "Parameter\022!\n\031input_processing_time_sum\030\014" + - " \001(\001\022#\n\033input_processing_time_count\030\r \001(" + - "\003\022\016\n\006inputs\030\016 \003(\003\0224\n\nnode_class\030\017 \001(\0162 ." + - "tensorflow.data.model.NodeClass\022\r\n\005ratio" + - "\030\020 \001(\001\022\024\n\014memory_ratio\030\021 \001(\001\032h\n\tParamete" + - "r\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\001\022\023\n\013state_" + - "value\030\003 \001(\001\022\013\n\003min\030\004 \001(\001\022\013\n\003max\030\005 \001(\001\022\017\n" + - "\007tunable\030\006 \001(\010\032T\n\nNodesEntry\022\013\n\003key\030\001 \001(" + - "\003\0225\n\005value\030\002 \001(\0132&.tensorflow.data.model" + - ".ModelProto.Node:\0028\001\032\223\001\n\022OptimizationPar" + - "ams\022;\n\talgorithm\030\001 \001(\0162(.tensorflow.data" + - ".model.AutotuneAlgorithm\022\022\n\ncpu_budget\030\002" + - " \001(\003\022\022\n\nram_budget\030\003 \001(\003\022\030\n\020model_input_" + - "time\030\004 \001(\001J\004\010\004\020\005*\234\001\n\tNodeClass\022\013\n\007UNKNOW" + - "N\020\000\022\023\n\017INTERLEAVE_MANY\020\001\022\031\n\025ASYNC_INTERL" + - "EAVE_MANY\020\002\022\017\n\013KNOWN_RATIO\020\003\022\025\n\021ASYNC_KN" + - "OWN_RATIO\020\004\022\021\n\rUNKNOWN_RATIO\020\005\022\027\n\023ASYNC_" + - "UNKNOWN_RATIO\020\006*l\n\021AutotuneAlgorithm\022\013\n\007" + - "DEFAULT\020\000\022\016\n\nHILL_CLIMB\020\001\022\024\n\020GRADIENT_DE" + - "SCENT\020\002\022\023\n\017MAX_PARALLELISM\020\003\022\017\n\013STAGE_BA" + - "SED\020\004Br\n\037org.tensorflow.proto.data.model" + - "ZLgithub.com/tensorflow/tensorflow/tenso" + - "rflow/go/core/framework/model_go_proto\370\001" + - "\001b\006proto3" + "\025tensorflow.data.model\"\235\010\n\nModelProto\022\024\n" + + "\014dataset_name\030\007 \001(\t\022;\n\005nodes\030\001 \003(\0132,.ten" + + "sorflow.data.model.ModelProto.NodesEntry" + + "\022\016\n\006output\030\002 \001(\003\022\022\n\nid_counter\030\003 \001(\003\022Q\n\023" + + "optimization_params\030\005 \001(\01324.tensorflow.d" + + "ata.model.ModelProto.OptimizationParams\022" + + "\021\n\tgap_times\030\006 \003(\004\032\277\004\n\004Node\022\n\n\002id\030\001 \001(\003\022" + + "\014\n\004name\030\002 \001(\t\022\020\n\010autotune\030\003 \001(\010\022\026\n\016buffe" + + "red_bytes\030\004 \001(\003\022\031\n\021buffered_elements\030\005 \001" + + "(\003\022\026\n\016bytes_consumed\030\006 \001(\003\022\026\n\016bytes_prod" + + "uced\030\007 \001(\003\022\024\n\014num_elements\030\010 \001(\003\022\027\n\017proc" + + "essing_time\030\t \001(\003\022\026\n\016record_metrics\030\n \001(" + + "\010\022D\n\nparameters\030\013 \003(\01320.tensorflow.data." + + "model.ModelProto.Node.Parameter\022!\n\031input" + + "_processing_time_sum\030\014 \001(\001\022#\n\033input_proc" + + "essing_time_count\030\r \001(\003\022\016\n\006inputs\030\016 \003(\003\022" + + "4\n\nnode_class\030\017 \001(\0162 .tensorflow.data.mo" + + "del.NodeClass\022\r\n\005ratio\030\020 \001(\001\022\024\n\014memory_r" + + "atio\030\021 \001(\001\032h\n\tParameter\022\014\n\004name\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\001\022\023\n\013state_value\030\003 \001(\001\022\013\n\003min" + + "\030\004 \001(\001\022\013\n\003max\030\005 \001(\001\022\017\n\007tunable\030\006 \001(\010\032T\n\n" + + "NodesEntry\022\013\n\003key\030\001 \001(\003\0225\n\005value\030\002 \001(\0132&" + + ".tensorflow.data.model.ModelProto.Node:\002" + + "8\001\032\223\001\n\022OptimizationParams\022;\n\talgorithm\030\001" + + " \001(\0162(.tensorflow.data.model.AutotuneAlg" + + "orithm\022\022\n\ncpu_budget\030\002 \001(\003\022\022\n\nram_budget" + + "\030\003 \001(\003\022\030\n\020model_input_time\030\004 \001(\001J\004\010\004\020\005*\234" + + "\001\n\tNodeClass\022\013\n\007UNKNOWN\020\000\022\023\n\017INTERLEAVE_" + + "MANY\020\001\022\031\n\025ASYNC_INTERLEAVE_MANY\020\002\022\017\n\013KNO" + + "WN_RATIO\020\003\022\025\n\021ASYNC_KNOWN_RATIO\020\004\022\021\n\rUNK" + + "NOWN_RATIO\020\005\022\027\n\023ASYNC_UNKNOWN_RATIO\020\006*l\n" + + "\021AutotuneAlgorithm\022\013\n\007DEFAULT\020\000\022\016\n\nHILL_" + + "CLIMB\020\001\022\024\n\020GRADIENT_DESCENT\020\002\022\023\n\017MAX_PAR" + + "ALLELISM\020\003\022\017\n\013STAGE_BASED\020\004Br\n\037org.tenso" + + "rflow.proto.data.modelZLgithub.com/tenso" + + "rflow/tensorflow/tensorflow/go/core/fram" + + "ework/model_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -6277,7 +6462,7 @@ public org.tensorflow.proto.data.model.Model.ModelProto getDefaultInstanceForTyp internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_model_ModelProto_descriptor, - new java.lang.String[] { "Nodes", "Output", "IdCounter", "OptimizationParams", "GapTimes", }); + new java.lang.String[] { "DatasetName", "Nodes", "Output", "IdCounter", "OptimizationParams", "GapTimes", }); internal_static_tensorflow_data_model_ModelProto_Node_descriptor = internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(0); internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/BfcMemoryMap.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/BfcMemoryMap.java index 9ddd1a3d74f..38f0ce96ef4 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/BfcMemoryMap.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/BfcMemoryMap.java @@ -24,11 +24,11 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n-tensorflow/core/protobuf/bfc_memory_ma" + - "p.proto\022\020tensorflow.dummy\032!tsl/protobuf/" + - "bfc_memory_map.protoBs\n\032org.tensorflow.p" + - "roto.dummyZUgithub.com/tensorflow/tensor" + - "flow/tensorflow/go/core/protobuf/for_cor" + - "e_protos_go_protoP\000b\006proto3" + "p.proto\022\020tensorflow.dummy\032%xla/tsl/proto" + + "buf/bfc_memory_map.protoBs\n\032org.tensorfl" + + "ow.proto.dummyZUgithub.com/tensorflow/te" + + "nsorflow/tensorflow/go/core/protobuf/for" + + "_core_protos_go_protoP\000b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/TestLog.java b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/TestLog.java index 7f4925fa6b5..95f0ab4c9c2 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/TestLog.java +++ b/tensorflow-core/tensorflow-core-native/src/gen/java/org/tensorflow/proto/dummy/TestLog.java @@ -24,9 +24,9 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n#tensorflow/core/util/test_log.proto\022\020t" + - "ensorflow.dummy\032\033tsl/protobuf/test_log.p" + - "rotoB\034\n\032org.tensorflow.proto.dummyP\000b\006pr" + - "oto3" + "ensorflow.dummy\032\037xla/tsl/protobuf/test_l" + + "og.protoB\034\n\032org.tensorflow.proto.dummyP\000" + + "b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-native/src/gen/resources/org/tensorflow/base_api/api_def_AssignVariableXlaConcatND.pbtxt b/tensorflow-core/tensorflow-core-native/src/gen/resources/org/tensorflow/base_api/api_def_AssignVariableXlaConcatND.pbtxt index 646f602af22..6bd6bcd8d05 100644 --- a/tensorflow-core/tensorflow-core-native/src/gen/resources/org/tensorflow/base_api/api_def_AssignVariableXlaConcatND.pbtxt +++ b/tensorflow-core/tensorflow-core-native/src/gen/resources/org/tensorflow/base_api/api_def_AssignVariableXlaConcatND.pbtxt @@ -5,17 +5,13 @@ op { name: "resource" description: <

    The above computation has a replicated output of two replicas. * - * @param data type for {@code outputs} output - * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedOutput} instead */ @OpMetadata( @@ -54,6 +53,9 @@ inputsClass = TPUReplicatedOutput.Inputs.class ) @Deprecated +@Operator( + group = "tpu" +) public final class TPUReplicatedOutput extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java index c513012f920..c1ddadbc8a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -44,6 +45,9 @@ opType = TPUReshardVariables.OP_NAME, inputsClass = TPUReshardVariables.Inputs.class ) +@Operator( + group = "tpu" +) public final class TPUReshardVariables extends RawOp { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/UpdateTaskIdAndGlobalCoreArray.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/UpdateTaskIdAndGlobalCoreArray.java new file mode 100644 index 00000000000..1a0fb866178 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/UpdateTaskIdAndGlobalCoreArray.java @@ -0,0 +1,86 @@ +/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TInt32; + +/** + * An op to update the task ID and global core array. + * This op is to update the task ID and global core array. + */ +@OpMetadata( + opType = UpdateTaskIdAndGlobalCoreArray.OP_NAME, + inputsClass = UpdateTaskIdAndGlobalCoreArray.Inputs.class +) +public final class UpdateTaskIdAndGlobalCoreArray extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "UpdateTaskIdAndGlobalCoreArray"; + + public UpdateTaskIdAndGlobalCoreArray(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new UpdateTaskIdAndGlobalCoreArray operation. + * + * @param scope current scope + * @param tpuTaskIdToShardId An array of int32 that maps TPU task ID to shard ID. + * @return a new instance of UpdateTaskIdAndGlobalCoreArray + */ + @Endpoint( + describeByClass = true + ) + public static UpdateTaskIdAndGlobalCoreArray create(Scope scope, + Iterable> tpuTaskIdToShardId) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "UpdateTaskIdAndGlobalCoreArray"); + opBuilder.addInputList(Operands.asOutputs(tpuTaskIdToShardId)); + return new UpdateTaskIdAndGlobalCoreArray(opBuilder.build()); + } + + @OpInputsMetadata( + outputsClass = UpdateTaskIdAndGlobalCoreArray.class + ) + public static class Inputs extends RawOpInputs { + /** + * An array of int32 that maps TPU task ID to shard ID. + */ + public final Iterable> tpuTaskIdToShardId; + + public Inputs(GraphOperation op) { + super(new UpdateTaskIdAndGlobalCoreArray(op), op, Arrays.asList()); + int inputIndex = 0; + int tpuTaskIdToShardIdLength = op.inputListLength("tpu_task_id_to_shard_id"); + tpuTaskIdToShardId = Arrays.asList((Operand[]) op.inputList(inputIndex, tpuTaskIdToShardIdLength)); + inputIndex += tpuTaskIdToShardIdLength; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java index 690ac095ebc..46a9eaba027 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** @@ -40,6 +41,9 @@ opType = WorkerHeartbeat.OP_NAME, inputsClass = WorkerHeartbeat.Inputs.class ) +@Operator( + group = "tpu" +) public final class WorkerHeartbeat extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java index a2d152ab93e..e7c94866732 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java @@ -43,8 +43,6 @@ * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * - * @param data type for {@code average} output */ @OpMetadata( opType = AccumulatorTakeGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java index b2b297a82d5..0bdb47444ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -37,13 +38,14 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * v_t <- max(beta2 * v_{t-1}, abs(g)) * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdaMax.OP_NAME, inputsClass = ApplyAdaMax.Inputs.class ) +@Operator( + group = "train" +) public final class ApplyAdaMax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -205,7 +207,7 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * If `True`, updating of the var, m, and v tensors will be protected + * If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java index be5bdc297ea..7d53245fe2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java @@ -39,8 +39,6 @@ * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdadelta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java index 94ede4d4a85..0d243bfce4b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java @@ -37,8 +37,6 @@ * Update '*var' according to the adagrad scheme. * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagrad.OP_NAME, @@ -198,7 +196,7 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * If `True`, updating of the var and accum tensors will be protected + * If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java index b1577260bf8..a2769eae2e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java @@ -36,8 +36,6 @@ /** * Update '*var' according to the proximal adagrad scheme. - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagradDa.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java index f13433b87e8..22d0edd340e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java @@ -29,6 +29,7 @@ import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.DataType; import org.tensorflow.types.family.TType; @@ -36,13 +37,14 @@ * Update '*var' according to the adagrad scheme. * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagradV2.OP_NAME, inputsClass = ApplyAdagradV2.Inputs.class ) +@Operator( + group = "train" +) public final class ApplyAdagradV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -201,7 +203,7 @@ public static class Inputs extends RawOpInputs{t-1} + (1 - \beta_1) \cdot g$$ * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdam.OP_NAME, @@ -243,14 +241,14 @@ public static class Inputs extends RawOpInputs> { public final DataType T; /** - * If `True`, updating of the var, m, and v tensors will be protected + * If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ public final boolean useLocking; /** - * If `True`, uses the nesterov update. + * If {@code True}, uses the nesterov update. */ public final boolean useNesterov; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java index 6adc7810232..69127231eb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java @@ -38,8 +38,6 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAddSign.OP_NAME, @@ -195,7 +193,7 @@ public static class Inputs extends RawOpInputs> public final DataType T; /** - * If `True`, updating of the var and m tensors is + * If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java index ff482191f8c..f7801bf277e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java @@ -49,8 +49,6 @@ * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyCenteredRmsProp.OP_NAME, @@ -220,7 +218,7 @@ public static class Inputs extends RawOpInputs data type for {@code out} output */ @OpMetadata( opType = ApplyFtrl.OP_NAME, @@ -239,7 +237,7 @@ public static class Inputs extends RawOpInputs> { public final DataType T; /** - * If `True`, updating of the var and accum tensors will be protected + * If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java index 3ecfae71626..5ebb7b31330 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java @@ -35,8 +35,6 @@ /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyGradientDescent.OP_NAME, @@ -161,7 +159,7 @@ public static class Inputs extends RawOpInputsaccum = accum * momentum + grad * var -= lr * accum - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyMomentum.OP_NAME, @@ -210,14 +208,14 @@ public static class Inputs extends RawOpInputs public final DataType T; /** - * If `True`, updating of the var and accum tensors will be protected + * If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ public final boolean useLocking; /** - * If `True`, the tensor passed to compute grad will be + * If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java index 800923076c3..f298f853be2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java @@ -38,8 +38,6 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyPowerSign.OP_NAME, @@ -195,7 +193,7 @@ public static class Inputs extends RawOpInputsl2) * max{|prox_v|-lrl1,0} - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyProximalAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java index 488faf4d559..ffd6ee70e68 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java @@ -37,8 +37,6 @@ * Update '*var' as FOBOS algorithm with fixed learning rate. * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyProximalGradientDescent.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java index f4ea6418cbb..fcfeb5b895a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java @@ -43,8 +43,6 @@ *