From e0eec4a6c616120e2b7d2e568388baa740829639 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 10 Nov 2021 17:38:37 -0800 Subject: [PATCH 01/21] Graph custom gradient support (#292) --- tensorflow-core/tensorflow-core-api/WORKSPACE | 2 + .../external/custom-grad-helpers.patch | 57 +++ .../external/custom-grad-symbols.patch | 151 ++++++++ tensorflow-core/tensorflow-core-api/pom.xml | 18 + .../annotations/org/tensorflow/op/Ops.java | 8 +- .../tensorflow/internal/c_api/GradFunc.java | 25 ++ .../internal/c_api/GradOpRegistry.java | 48 +++ .../tensorflow/internal/c_api/NameMap.java | 41 ++ .../internal/c_api/NativeGraphPointer.java | 18 + .../internal/c_api/NativeOperation.java | 49 +++ .../internal/c_api/NativeOutput.java | 45 +++ .../internal/c_api/NativeOutputVector.java | 79 ++++ .../internal/c_api/NativeStatus.java | 94 +++++ .../org/tensorflow/internal/c_api/Node.java | 155 ++++++++ .../internal/c_api/NodeBuilder.java | 17 + .../tensorflow/internal/c_api/TF_Graph.java | 45 ++- .../internal/c_api/TF_Operation.java | 11 +- .../c_api/TF_OperationDescription.java | 12 +- .../tensorflow/internal/c_api/TF_Scope.java | 216 +++++++++++ .../internal/c_api/global/tensorflow.java | 358 +++++++++++++++++- .../tensorflow/op/audio/AudioSpectrogram.java | 13 +- .../org/tensorflow/op/audio/DecodeWav.java | 13 +- .../org/tensorflow/op/audio/EncodeWav.java | 13 +- .../java/org/tensorflow/op/audio/Mfcc.java | 13 +- .../org/tensorflow/op/bitwise/BitwiseAnd.java | 13 +- .../org/tensorflow/op/bitwise/BitwiseOr.java | 13 +- .../org/tensorflow/op/bitwise/BitwiseXor.java | 13 +- .../org/tensorflow/op/bitwise/Invert.java | 13 +- .../org/tensorflow/op/bitwise/LeftShift.java | 13 +- .../org/tensorflow/op/bitwise/RightShift.java | 13 +- .../op/cluster/KMC2ChainInitialization.java | 13 +- .../cluster/KmeansPlusPlusInitialization.java | 13 +- .../tensorflow/op/collective/AllReduce.java | 13 +- .../op/collective/BroadcastRecv.java | 13 +- .../op/collective/BroadcastSend.java | 13 +- .../org/tensorflow/op/collective/Gather.java | 13 +- .../tensorflow/op/collective/GatherV2.java | 13 +- .../org/tensorflow/op/collective/Reduce.java | 13 +- .../tensorflow/op/collective/ReduceV2.java | 13 +- .../java/org/tensorflow/op/core/Abort.java | 13 +- .../gen/java/org/tensorflow/op/core/All.java | 13 +- .../gen/java/org/tensorflow/op/core/Any.java | 13 +- .../org/tensorflow/op/core/AssertThat.java | 13 +- .../java/org/tensorflow/op/core/Assign.java | 13 +- .../org/tensorflow/op/core/AssignAdd.java | 13 +- .../op/core/AssignAddVariableOp.java | 13 +- .../org/tensorflow/op/core/AssignSub.java | 13 +- .../op/core/AssignSubVariableOp.java | 13 +- .../tensorflow/op/core/AssignVariableOp.java | 13 +- .../java/org/tensorflow/op/core/Barrier.java | 13 +- .../org/tensorflow/op/core/BarrierClose.java | 13 +- .../op/core/BarrierIncompleteSize.java | 13 +- .../tensorflow/op/core/BarrierInsertMany.java | 13 +- .../tensorflow/op/core/BarrierReadySize.java | 13 +- .../tensorflow/op/core/BarrierTakeMany.java | 13 +- .../java/org/tensorflow/op/core/Batch.java | 13 +- .../org/tensorflow/op/core/BatchFunction.java | 13 +- .../org/tensorflow/op/core/BatchToSpace.java | 13 +- .../tensorflow/op/core/BatchToSpaceNd.java | 13 +- .../java/org/tensorflow/op/core/Bitcast.java | 13 +- .../op/core/BroadcastDynamicShape.java | 13 +- .../op/core/BroadcastGradientArgs.java | 13 +- .../org/tensorflow/op/core/BroadcastTo.java | 13 +- .../org/tensorflow/op/core/Bucketize.java | 13 +- .../org/tensorflow/op/core/ClipByValue.java | 13 +- .../tensorflow/op/core/CollectiveGather.java | 13 +- .../CompositeTensorVariantFromComponents.java | 13 +- .../CompositeTensorVariantToComponents.java | 13 +- .../java/org/tensorflow/op/core/Concat.java | 13 +- .../tensorflow/op/core/ConsumeMutexLock.java | 13 +- .../tensorflow/op/core/ControlTrigger.java | 13 +- .../gen/java/org/tensorflow/op/core/Copy.java | 13 +- .../java/org/tensorflow/op/core/CopyHost.java | 13 +- .../org/tensorflow/op/core/CountUpTo.java | 13 +- .../org/tensorflow/op/core/DecodeProto.java | 13 +- .../java/org/tensorflow/op/core/DeepCopy.java | 13 +- .../op/core/DeleteSessionTensor.java | 13 +- .../tensorflow/op/core/DestroyResourceOp.java | 13 +- .../op/core/DestroyTemporaryVariable.java | 13 +- .../org/tensorflow/op/core/DeviceIndex.java | 13 +- .../tensorflow/op/core/DummyMemoryCache.java | 13 +- .../tensorflow/op/core/DynamicPartition.java | 13 +- .../org/tensorflow/op/core/DynamicStitch.java | 13 +- .../org/tensorflow/op/core/EditDistance.java | 13 +- .../java/org/tensorflow/op/core/Empty.java | 13 +- .../tensorflow/op/core/EmptyTensorList.java | 13 +- .../tensorflow/op/core/EmptyTensorMap.java | 13 +- .../org/tensorflow/op/core/EncodeProto.java | 13 +- .../org/tensorflow/op/core/EnsureShape.java | 13 +- .../java/org/tensorflow/op/core/Enter.java | 13 +- .../gen/java/org/tensorflow/op/core/Exit.java | 13 +- .../org/tensorflow/op/core/ExpandDims.java | 13 +- .../op/core/ExtractVolumePatches.java | 13 +- .../gen/java/org/tensorflow/op/core/Fill.java | 13 +- .../org/tensorflow/op/core/Fingerprint.java | 13 +- .../gen/java/org/tensorflow/op/core/For.java | 13 +- .../java/org/tensorflow/op/core/Gather.java | 13 +- .../java/org/tensorflow/op/core/GatherNd.java | 13 +- .../tensorflow/op/core/GetSessionHandle.java | 13 +- .../tensorflow/op/core/GetSessionTensor.java | 13 +- .../tensorflow/op/core/GuaranteeConst.java | 13 +- .../org/tensorflow/op/core/HashTable.java | 13 +- .../op/core/HistogramFixedWidth.java | 13 +- .../java/org/tensorflow/op/core/Identity.java | 13 +- .../org/tensorflow/op/core/IdentityN.java | 13 +- .../tensorflow/op/core/ImmutableConst.java | 13 +- .../tensorflow/op/core/InitializeTable.java | 13 +- .../op/core/InitializeTableFromTextFile.java | 13 +- .../org/tensorflow/op/core/InplaceAdd.java | 13 +- .../org/tensorflow/op/core/InplaceSub.java | 13 +- .../org/tensorflow/op/core/InplaceUpdate.java | 13 +- .../op/core/IsVariableInitialized.java | 13 +- .../tensorflow/op/core/KthOrderStatistic.java | 13 +- .../java/org/tensorflow/op/core/LinSpace.java | 13 +- .../tensorflow/op/core/LookupTableExport.java | 13 +- .../tensorflow/op/core/LookupTableFind.java | 13 +- .../tensorflow/op/core/LookupTableImport.java | 13 +- .../tensorflow/op/core/LookupTableInsert.java | 13 +- .../tensorflow/op/core/LookupTableRemove.java | 13 +- .../tensorflow/op/core/LookupTableSize.java | 13 +- .../java/org/tensorflow/op/core/LoopCond.java | 13 +- .../org/tensorflow/op/core/LowerBound.java | 13 +- .../org/tensorflow/op/core/MakeUnique.java | 13 +- .../java/org/tensorflow/op/core/MapClear.java | 13 +- .../java/org/tensorflow/op/core/MapDefun.java | 13 +- .../tensorflow/op/core/MapIncompleteSize.java | 13 +- .../java/org/tensorflow/op/core/MapPeek.java | 13 +- .../java/org/tensorflow/op/core/MapSize.java | 13 +- .../java/org/tensorflow/op/core/MapStage.java | 13 +- .../org/tensorflow/op/core/MapUnstage.java | 13 +- .../tensorflow/op/core/MapUnstageNoKey.java | 13 +- .../gen/java/org/tensorflow/op/core/Max.java | 13 +- .../java/org/tensorflow/op/core/Merge.java | 13 +- .../gen/java/org/tensorflow/op/core/Min.java | 13 +- .../org/tensorflow/op/core/MirrorPad.java | 13 +- .../org/tensorflow/op/core/MirrorPadGrad.java | 13 +- .../tensorflow/op/core/MlirPassthroughOp.java | 13 +- .../op/core/MutableDenseHashTable.java | 13 +- .../tensorflow/op/core/MutableHashTable.java | 13 +- .../op/core/MutableHashTableOfTensors.java | 13 +- .../java/org/tensorflow/op/core/Mutex.java | 13 +- .../org/tensorflow/op/core/MutexLock.java | 13 +- .../org/tensorflow/op/core/NcclAllReduce.java | 13 +- .../org/tensorflow/op/core/NcclBroadcast.java | 13 +- .../org/tensorflow/op/core/NcclReduce.java | 13 +- .../org/tensorflow/op/core/NextIteration.java | 13 +- .../gen/java/org/tensorflow/op/core/NoOp.java | 13 +- .../java/org/tensorflow/op/core/OneHot.java | 13 +- .../java/org/tensorflow/op/core/OnesLike.java | 13 +- .../tensorflow/op/core/OrderedMapClear.java | 13 +- .../op/core/OrderedMapIncompleteSize.java | 13 +- .../tensorflow/op/core/OrderedMapPeek.java | 13 +- .../tensorflow/op/core/OrderedMapSize.java | 13 +- .../tensorflow/op/core/OrderedMapStage.java | 13 +- .../tensorflow/op/core/OrderedMapUnstage.java | 13 +- .../op/core/OrderedMapUnstageNoKey.java | 13 +- .../gen/java/org/tensorflow/op/core/Pad.java | 13 +- .../tensorflow/op/core/ParallelConcat.java | 13 +- .../op/core/ParallelDynamicStitch.java | 13 +- .../org/tensorflow/op/core/Placeholder.java | 13 +- .../op/core/PlaceholderWithDefault.java | 13 +- .../java/org/tensorflow/op/core/Print.java | 13 +- .../gen/java/org/tensorflow/op/core/Prod.java | 13 +- .../tensorflow/op/core/QuantizedReshape.java | 13 +- .../java/org/tensorflow/op/core/Range.java | 13 +- .../gen/java/org/tensorflow/op/core/Rank.java | 13 +- .../tensorflow/op/core/ReadVariableOp.java | 13 +- .../gen/java/org/tensorflow/op/core/Recv.java | 13 +- .../org/tensorflow/op/core/ReduceAll.java | 13 +- .../org/tensorflow/op/core/ReduceAny.java | 13 +- .../org/tensorflow/op/core/ReduceMax.java | 13 +- .../org/tensorflow/op/core/ReduceMin.java | 13 +- .../org/tensorflow/op/core/ReduceProd.java | 13 +- .../org/tensorflow/op/core/ReduceSum.java | 13 +- .../java/org/tensorflow/op/core/RefEnter.java | 13 +- .../java/org/tensorflow/op/core/RefExit.java | 13 +- .../org/tensorflow/op/core/RefIdentity.java | 13 +- .../java/org/tensorflow/op/core/RefMerge.java | 13 +- .../tensorflow/op/core/RefNextIteration.java | 13 +- .../org/tensorflow/op/core/RefSelect.java | 13 +- .../org/tensorflow/op/core/RefSwitch.java | 13 +- .../org/tensorflow/op/core/RemoteCall.java | 13 +- .../java/org/tensorflow/op/core/Reshape.java | 13 +- .../tensorflow/op/core/ResourceCountUpTo.java | 13 +- .../tensorflow/op/core/ResourceGather.java | 13 +- .../tensorflow/op/core/ResourceGatherNd.java | 13 +- .../op/core/ResourceScatterAdd.java | 13 +- .../op/core/ResourceScatterDiv.java | 13 +- .../op/core/ResourceScatterMax.java | 13 +- .../op/core/ResourceScatterMin.java | 13 +- .../op/core/ResourceScatterMul.java | 13 +- .../op/core/ResourceScatterNdAdd.java | 13 +- .../op/core/ResourceScatterNdMax.java | 13 +- .../op/core/ResourceScatterNdMin.java | 13 +- .../op/core/ResourceScatterNdSub.java | 13 +- .../op/core/ResourceScatterNdUpdate.java | 13 +- .../op/core/ResourceScatterSub.java | 13 +- .../op/core/ResourceScatterUpdate.java | 13 +- .../op/core/ResourceStridedSliceAssign.java | 13 +- .../java/org/tensorflow/op/core/Reverse.java | 13 +- .../tensorflow/op/core/ReverseSequence.java | 13 +- .../gen/java/org/tensorflow/op/core/Roll.java | 13 +- .../org/tensorflow/op/core/ScatterAdd.java | 13 +- .../org/tensorflow/op/core/ScatterDiv.java | 13 +- .../org/tensorflow/op/core/ScatterMax.java | 13 +- .../org/tensorflow/op/core/ScatterMin.java | 13 +- .../org/tensorflow/op/core/ScatterMul.java | 13 +- .../org/tensorflow/op/core/ScatterNd.java | 13 +- .../org/tensorflow/op/core/ScatterNdAdd.java | 13 +- .../org/tensorflow/op/core/ScatterNdMax.java | 13 +- .../org/tensorflow/op/core/ScatterNdMin.java | 13 +- .../op/core/ScatterNdNonAliasingAdd.java | 13 +- .../org/tensorflow/op/core/ScatterNdSub.java | 13 +- .../tensorflow/op/core/ScatterNdUpdate.java | 13 +- .../org/tensorflow/op/core/ScatterSub.java | 13 +- .../org/tensorflow/op/core/ScatterUpdate.java | 13 +- .../java/org/tensorflow/op/core/Select.java | 13 +- .../gen/java/org/tensorflow/op/core/Send.java | 13 +- .../org/tensorflow/op/core/SetDiff1d.java | 13 +- .../java/org/tensorflow/op/core/SetSize.java | 13 +- .../java/org/tensorflow/op/core/Shape.java | 13 +- .../java/org/tensorflow/op/core/ShapeN.java | 13 +- .../gen/java/org/tensorflow/op/core/Size.java | 13 +- .../java/org/tensorflow/op/core/Skipgram.java | 13 +- .../java/org/tensorflow/op/core/Slice.java | 13 +- .../java/org/tensorflow/op/core/Snapshot.java | 13 +- .../tensorflow/op/core/SpaceToBatchNd.java | 13 +- .../java/org/tensorflow/op/core/Split.java | 13 +- .../java/org/tensorflow/op/core/SplitV.java | 13 +- .../java/org/tensorflow/op/core/Squeeze.java | 13 +- .../java/org/tensorflow/op/core/Stack.java | 13 +- .../java/org/tensorflow/op/core/Stage.java | 13 +- .../org/tensorflow/op/core/StageClear.java | 13 +- .../org/tensorflow/op/core/StagePeek.java | 13 +- .../org/tensorflow/op/core/StageSize.java | 13 +- .../org/tensorflow/op/core/StatefulCase.java | 13 +- .../org/tensorflow/op/core/StatefulIf.java | 13 +- .../op/core/StatefulPartitionedCall.java | 13 +- .../org/tensorflow/op/core/StatefulWhile.java | 13 +- .../org/tensorflow/op/core/StatelessCase.java | 13 +- .../org/tensorflow/op/core/StatelessIf.java | 13 +- .../op/core/StatelessPartitionedCall.java | 13 +- .../tensorflow/op/core/StatelessWhile.java | 13 +- .../org/tensorflow/op/core/StopGradient.java | 13 +- .../org/tensorflow/op/core/StridedSlice.java | 13 +- .../op/core/StridedSliceAssign.java | 13 +- .../tensorflow/op/core/StridedSliceGrad.java | 13 +- .../gen/java/org/tensorflow/op/core/Sum.java | 13 +- .../org/tensorflow/op/core/SwitchCond.java | 13 +- .../tensorflow/op/core/TemporaryVariable.java | 13 +- .../org/tensorflow/op/core/TensorArray.java | 13 +- .../tensorflow/op/core/TensorArrayClose.java | 13 +- .../tensorflow/op/core/TensorArrayConcat.java | 13 +- .../tensorflow/op/core/TensorArrayGather.java | 13 +- .../tensorflow/op/core/TensorArrayGrad.java | 13 +- .../op/core/TensorArrayGradWithShape.java | 13 +- .../tensorflow/op/core/TensorArrayPack.java | 13 +- .../tensorflow/op/core/TensorArrayRead.java | 13 +- .../op/core/TensorArrayScatter.java | 13 +- .../tensorflow/op/core/TensorArraySize.java | 13 +- .../tensorflow/op/core/TensorArraySplit.java | 13 +- .../tensorflow/op/core/TensorArrayUnpack.java | 13 +- .../tensorflow/op/core/TensorArrayWrite.java | 13 +- .../tensorflow/op/core/TensorListConcat.java | 13 +- .../op/core/TensorListConcatLists.java | 13 +- .../op/core/TensorListElementShape.java | 13 +- .../op/core/TensorListFromTensor.java | 13 +- .../tensorflow/op/core/TensorListGather.java | 13 +- .../tensorflow/op/core/TensorListGetItem.java | 13 +- .../tensorflow/op/core/TensorListLength.java | 13 +- .../tensorflow/op/core/TensorListPopBack.java | 13 +- .../op/core/TensorListPushBack.java | 13 +- .../op/core/TensorListPushBackBatch.java | 13 +- .../tensorflow/op/core/TensorListReserve.java | 13 +- .../tensorflow/op/core/TensorListResize.java | 13 +- .../tensorflow/op/core/TensorListScatter.java | 13 +- .../TensorListScatterIntoExistingList.java | 13 +- .../tensorflow/op/core/TensorListSetItem.java | 13 +- .../tensorflow/op/core/TensorListSplit.java | 13 +- .../tensorflow/op/core/TensorListStack.java | 13 +- .../tensorflow/op/core/TensorMapErase.java | 13 +- .../tensorflow/op/core/TensorMapHasKey.java | 13 +- .../tensorflow/op/core/TensorMapInsert.java | 13 +- .../tensorflow/op/core/TensorMapLookup.java | 13 +- .../org/tensorflow/op/core/TensorMapSize.java | 13 +- .../op/core/TensorMapStackKeys.java | 13 +- .../op/core/TensorScatterNdAdd.java | 13 +- .../op/core/TensorScatterNdMax.java | 13 +- .../op/core/TensorScatterNdMin.java | 13 +- .../op/core/TensorScatterNdSub.java | 13 +- .../op/core/TensorScatterNdUpdate.java | 13 +- .../op/core/TensorStridedSliceUpdate.java | 13 +- .../gen/java/org/tensorflow/op/core/Tile.java | 13 +- .../org/tensorflow/op/core/Timestamp.java | 13 +- .../org/tensorflow/op/core/TopKUnique.java | 13 +- .../tensorflow/op/core/TopKWithUnique.java | 13 +- .../java/org/tensorflow/op/core/Unbatch.java | 13 +- .../org/tensorflow/op/core/UnbatchGrad.java | 13 +- .../java/org/tensorflow/op/core/Unique.java | 13 +- .../tensorflow/op/core/UniqueWithCounts.java | 13 +- .../org/tensorflow/op/core/UnravelIndex.java | 13 +- .../java/org/tensorflow/op/core/Unstack.java | 13 +- .../java/org/tensorflow/op/core/Unstage.java | 13 +- .../org/tensorflow/op/core/UpperBound.java | 13 +- .../org/tensorflow/op/core/VarHandleOp.java | 13 +- .../op/core/VarIsInitializedOp.java | 13 +- .../java/org/tensorflow/op/core/Variable.java | 13 +- .../org/tensorflow/op/core/VariableShape.java | 13 +- .../java/org/tensorflow/op/core/Where.java | 13 +- .../java/org/tensorflow/op/core/Window.java | 13 +- .../org/tensorflow/op/core/ZerosLike.java | 13 +- .../tensorflow/op/data/AnonymousIterator.java | 13 +- .../op/data/AnonymousMemoryCache.java | 13 +- .../op/data/AnonymousMultiDeviceIterator.java | 13 +- .../op/data/AssertCardinalityDataset.java | 13 +- .../tensorflow/op/data/AssertNextDataset.java | 13 +- .../tensorflow/op/data/AutoShardDataset.java | 13 +- .../org/tensorflow/op/data/BatchDataset.java | 13 +- .../op/data/BytesProducedStatsDataset.java | 13 +- .../org/tensorflow/op/data/CSVDataset.java | 13 +- .../org/tensorflow/op/data/CacheDataset.java | 13 +- .../op/data/ChooseFastestBranchDataset.java | 13 +- .../op/data/ChooseFastestDataset.java | 13 +- .../tensorflow/op/data/CompressElement.java | 13 +- .../op/data/ConcatenateDataset.java | 13 +- .../op/data/DataServiceDatasetV2.java | 13 +- .../op/data/DatasetCardinality.java | 13 +- .../tensorflow/op/data/DatasetFromGraph.java | 13 +- .../tensorflow/op/data/DatasetToGraph.java | 13 +- .../op/data/DatasetToSingleElement.java | 13 +- .../tensorflow/op/data/DatasetToTfRecord.java | 13 +- .../tensorflow/op/data/DeleteIterator.java | 13 +- .../tensorflow/op/data/DeleteMemoryCache.java | 13 +- .../op/data/DeleteMultiDeviceIterator.java | 13 +- .../op/data/DenseToSparseBatchDataset.java | 13 +- .../op/data/DeserializeIterator.java | 13 +- .../op/data/DirectedInterleaveDataset.java | 13 +- .../op/data/DummyIterationCounter.java | 13 +- .../op/data/FilterByLastComponentDataset.java | 13 +- .../org/tensorflow/op/data/FilterDataset.java | 13 +- .../tensorflow/op/data/FinalizeDataset.java | 13 +- .../op/data/FixedLengthRecordDataset.java | 13 +- .../tensorflow/op/data/FlatMapDataset.java | 13 +- .../tensorflow/op/data/GeneratorDataset.java | 13 +- .../op/data/GroupByReducerDataset.java | 13 +- .../op/data/GroupByWindowDataset.java | 13 +- .../op/data/IgnoreErrorsDataset.java | 13 +- .../op/data/InitializeTableFromDataset.java | 13 +- .../tensorflow/op/data/InterleaveDataset.java | 13 +- .../java/org/tensorflow/op/data/Iterator.java | 13 +- .../op/data/IteratorFromStringHandle.java | 13 +- .../tensorflow/op/data/IteratorGetDevice.java | 13 +- .../tensorflow/op/data/IteratorGetNext.java | 13 +- .../op/data/IteratorGetNextAsOptional.java | 13 +- .../op/data/IteratorGetNextSync.java | 13 +- .../op/data/IteratorToStringHandle.java | 13 +- .../org/tensorflow/op/data/LMDBDataset.java | 13 +- .../op/data/LatencyStatsDataset.java | 13 +- .../org/tensorflow/op/data/LeakyReluGrad.java | 13 +- .../data/LegacyParallelInterleaveDataset.java | 13 +- .../org/tensorflow/op/data/LoadDataset.java | 13 +- .../org/tensorflow/op/data/MakeIterator.java | 13 +- .../op/data/MapAndBatchDataset.java | 13 +- .../org/tensorflow/op/data/MapDataset.java | 13 +- .../op/data/MatchingFilesDataset.java | 13 +- .../op/data/MaxIntraOpParallelismDataset.java | 13 +- .../org/tensorflow/op/data/ModelDataset.java | 13 +- .../op/data/MultiDeviceIterator.java | 13 +- .../MultiDeviceIteratorFromStringHandle.java | 13 +- .../MultiDeviceIteratorGetNextFromShard.java | 13 +- .../op/data/MultiDeviceIteratorInit.java | 13 +- .../MultiDeviceIteratorToStringHandle.java | 13 +- .../op/data/NonSerializableDataset.java | 13 +- .../tensorflow/op/data/OneShotIterator.java | 13 +- .../tensorflow/op/data/OptimizeDataset.java | 13 +- .../tensorflow/op/data/OptionalFromValue.java | 13 +- .../tensorflow/op/data/OptionalGetValue.java | 13 +- .../tensorflow/op/data/OptionalHasValue.java | 13 +- .../org/tensorflow/op/data/OptionalNone.java | 13 +- .../tensorflow/op/data/OptionsDataset.java | 13 +- .../op/data/PaddedBatchDataset.java | 13 +- .../op/data/ParallelBatchDataset.java | 13 +- .../op/data/ParallelInterleaveDataset.java | 13 +- .../op/data/ParallelMapDataset.java | 13 +- .../op/data/ParseExampleDataset.java | 13 +- .../tensorflow/op/data/PrefetchDataset.java | 13 +- .../op/data/PrivateThreadPoolDataset.java | 13 +- .../org/tensorflow/op/data/RandomDataset.java | 13 +- .../org/tensorflow/op/data/RangeDataset.java | 13 +- .../tensorflow/op/data/RebatchDatasetV2.java | 13 +- .../org/tensorflow/op/data/ReduceDataset.java | 13 +- .../tensorflow/op/data/RegisterDataset.java | 13 +- .../org/tensorflow/op/data/RepeatDataset.java | 13 +- .../tensorflow/op/data/SamplingDataset.java | 13 +- .../org/tensorflow/op/data/SaveDataset.java | 13 +- .../org/tensorflow/op/data/ScanDataset.java | 13 +- .../tensorflow/op/data/SerializeIterator.java | 13 +- .../op/data/SetStatsAggregatorDataset.java | 13 +- .../org/tensorflow/op/data/ShardDataset.java | 13 +- .../op/data/ShuffleAndRepeatDataset.java | 13 +- .../tensorflow/op/data/ShuffleDataset.java | 13 +- .../org/tensorflow/op/data/SkipDataset.java | 13 +- .../org/tensorflow/op/data/SleepDataset.java | 13 +- .../op/data/SlidingWindowDataset.java | 13 +- .../tensorflow/op/data/SnapshotDataset.java | 13 +- .../op/data/SnapshotDatasetReader.java | 13 +- .../op/data/SnapshotNestedDatasetReader.java | 13 +- .../op/data/SparseTensorSliceDataset.java | 13 +- .../org/tensorflow/op/data/SqlDataset.java | 13 +- .../op/data/StatsAggregatorHandle.java | 13 +- .../data/StatsAggregatorSetSummaryWriter.java | 13 +- .../org/tensorflow/op/data/TakeDataset.java | 13 +- .../tensorflow/op/data/TakeWhileDataset.java | 13 +- .../org/tensorflow/op/data/TensorDataset.java | 13 +- .../op/data/TensorSliceDataset.java | 13 +- .../tensorflow/op/data/TextLineDataset.java | 13 +- .../tensorflow/op/data/TfRecordDataset.java | 13 +- .../tensorflow/op/data/ThreadPoolDataset.java | 13 +- .../tensorflow/op/data/ThreadPoolHandle.java | 13 +- .../tensorflow/op/data/UnbatchDataset.java | 13 +- .../tensorflow/op/data/UncompressElement.java | 13 +- .../org/tensorflow/op/data/UniqueDataset.java | 13 +- .../op/data/UnwrapDatasetVariant.java | 13 +- .../org/tensorflow/op/data/WindowDataset.java | 13 +- .../op/data/WrapDatasetVariant.java | 13 +- .../org/tensorflow/op/data/ZipDataset.java | 13 +- .../data/experimental/AssertNextDataset.java | 13 +- .../data/experimental/AutoShardDataset.java | 13 +- .../BytesProducedStatsDataset.java | 13 +- .../op/data/experimental/CSVDataset.java | 13 +- .../experimental/ChooseFastestDataset.java | 13 +- .../data/experimental/DatasetCardinality.java | 13 +- .../data/experimental/DatasetToTFRecord.java | 13 +- .../DenseToSparseBatchDataset.java | 13 +- .../DirectedInterleaveDataset.java | 13 +- .../experimental/GroupByReducerDataset.java | 13 +- .../experimental/GroupByWindowDataset.java | 13 +- .../experimental/IgnoreErrorsDataset.java | 13 +- .../data/experimental/IteratorGetDevice.java | 13 +- .../experimental/LatencyStatsDataset.java | 13 +- .../op/data/experimental/LmdbDataset.java | 13 +- .../data/experimental/MapAndBatchDataset.java | 13 +- .../op/data/experimental/MapDataset.java | 13 +- .../experimental/MatchingFilesDataset.java | 13 +- .../MaxIntraOpParallelismDataset.java | 13 +- .../experimental/NonSerializableDataset.java | 13 +- .../ParallelInterleaveDataset.java | 13 +- .../experimental/ParseExampleDataset.java | 13 +- .../PrivateThreadPoolDataset.java | 13 +- .../op/data/experimental/RandomDataset.java | 13 +- .../op/data/experimental/RebatchDataset.java | 13 +- .../op/data/experimental/ScanDataset.java | 13 +- .../SetStatsAggregatorDataset.java | 13 +- .../op/data/experimental/SleepDataset.java | 13 +- .../experimental/SlidingWindowDataset.java | 13 +- .../op/data/experimental/SqlDataset.java | 13 +- .../experimental/StatsAggregatorHandle.java | 13 +- .../experimental/StatsAggregatorSummary.java | 13 +- .../data/experimental/TakeWhileDataset.java | 13 +- .../data/experimental/ThreadPoolDataset.java | 13 +- .../data/experimental/ThreadPoolHandle.java | 13 +- .../op/data/experimental/UnbatchDataset.java | 13 +- .../op/data/experimental/UniqueDataset.java | 13 +- .../op/debugging/CheckNumerics.java | 13 +- .../op/debugging/DebugGradientIdentity.java | 13 +- .../debugging/DebugGradientRefIdentity.java | 13 +- .../op/debugging/DebugIdentity.java | 13 +- .../op/debugging/DebugNanCount.java | 13 +- .../op/debugging/DebugNumericsSummary.java | 13 +- .../op/distribute/NcclAllReduce.java | 13 +- .../op/distribute/NcclBroadcast.java | 13 +- .../tensorflow/op/distribute/NcclReduce.java | 13 +- .../org/tensorflow/op/dtypes/AsString.java | 13 +- .../java/org/tensorflow/op/dtypes/Cast.java | 13 +- .../org/tensorflow/op/dtypes/Complex.java | 13 +- .../java/org/tensorflow/op/dtypes/ToBool.java | 13 +- .../estimator/BoostedTreesAggregateStats.java | 13 +- .../op/estimator/BoostedTreesBucketize.java | 13 +- ...BoostedTreesCalculateBestFeatureSplit.java | 13 +- ...ostedTreesCalculateBestFeatureSplitV2.java | 13 +- ...stedTreesCalculateBestGainsPerFeature.java | 13 +- .../op/estimator/BoostedTreesCenterBias.java | 13 +- .../estimator/BoostedTreesCreateEnsemble.java | 13 +- ...stedTreesCreateQuantileStreamResource.java | 13 +- .../BoostedTreesDeserializeEnsemble.java | 13 +- .../BoostedTreesEnsembleResourceHandleOp.java | 13 +- .../BoostedTreesExampleDebugOutputs.java | 13 +- .../BoostedTreesFlushQuantileSummaries.java | 13 +- .../BoostedTreesGetEnsembleStates.java | 13 +- .../BoostedTreesMakeQuantileSummaries.java | 13 +- .../BoostedTreesMakeStatsSummary.java | 13 +- .../op/estimator/BoostedTreesPredict.java | 13 +- ...eesQuantileStreamResourceAddSummaries.java | 13 +- ...reesQuantileStreamResourceDeserialize.java | 13 +- ...ostedTreesQuantileStreamResourceFlush.java | 13 +- ...tileStreamResourceGetBucketBoundaries.java | 13 +- ...edTreesQuantileStreamResourceHandleOp.java | 13 +- .../BoostedTreesSerializeEnsemble.java | 13 +- .../BoostedTreesSparseAggregateStats.java | 13 +- ...dTreesSparseCalculateBestFeatureSplit.java | 13 +- .../BoostedTreesTrainingPredict.java | 13 +- .../estimator/BoostedTreesUpdateEnsemble.java | 13 +- .../BoostedTreesUpdateEnsembleV2.java | 13 +- .../IsBoostedTreesEnsembleInitialized.java | 13 +- ...reesQuantileStreamResourceInitialized.java | 13 +- .../tensorflow/op/image/AdjustContrast.java | 13 +- .../org/tensorflow/op/image/AdjustHue.java | 13 +- .../tensorflow/op/image/AdjustSaturation.java | 13 +- .../op/image/CombinedNonMaxSuppression.java | 13 +- .../tensorflow/op/image/CropAndResize.java | 13 +- .../op/image/CropAndResizeGradBoxes.java | 13 +- .../op/image/CropAndResizeGradImage.java | 13 +- .../op/image/DecodeAndCropJpeg.java | 13 +- .../org/tensorflow/op/image/DecodeBmp.java | 13 +- .../org/tensorflow/op/image/DecodeGif.java | 13 +- .../org/tensorflow/op/image/DecodeImage.java | 13 +- .../org/tensorflow/op/image/DecodeJpeg.java | 13 +- .../org/tensorflow/op/image/DecodePng.java | 13 +- .../op/image/DrawBoundingBoxes.java | 13 +- .../org/tensorflow/op/image/EncodeJpeg.java | 13 +- .../op/image/EncodeJpegVariableQuality.java | 13 +- .../org/tensorflow/op/image/EncodePng.java | 13 +- .../tensorflow/op/image/ExtractGlimpse.java | 13 +- .../op/image/ExtractImagePatches.java | 13 +- .../tensorflow/op/image/ExtractJpegShape.java | 13 +- .../image/GenerateBoundingBoxProposals.java | 13 +- .../org/tensorflow/op/image/HsvToRgb.java | 13 +- .../op/image/ImageProjectiveTransformV2.java | 13 +- .../op/image/ImageProjectiveTransformV3.java | 13 +- .../tensorflow/op/image/NearestNeighbors.java | 13 +- .../op/image/NonMaxSuppression.java | 13 +- .../image/NonMaxSuppressionWithOverlaps.java | 13 +- .../op/image/QuantizedResizeBilinear.java | 13 +- .../org/tensorflow/op/image/RandomCrop.java | 13 +- .../org/tensorflow/op/image/ResizeArea.java | 13 +- .../tensorflow/op/image/ResizeBicubic.java | 13 +- .../op/image/ResizeBicubicGrad.java | 13 +- .../tensorflow/op/image/ResizeBilinear.java | 13 +- .../op/image/ResizeBilinearGrad.java | 13 +- .../op/image/ResizeNearestNeighbor.java | 13 +- .../op/image/ResizeNearestNeighborGrad.java | 13 +- .../org/tensorflow/op/image/RgbToHsv.java | 13 +- .../op/image/SampleDistortedBoundingBox.java | 13 +- .../op/image/ScaleAndTranslate.java | 13 +- .../op/image/ScaleAndTranslateGrad.java | 13 +- .../StatelessSampleDistortedBoundingBox.java | 13 +- .../org/tensorflow/op/io/DecodeBase64.java | 13 +- .../tensorflow/op/io/DecodeCompressed.java | 13 +- .../java/org/tensorflow/op/io/DecodeCsv.java | 13 +- .../tensorflow/op/io/DecodeJsonExample.java | 13 +- .../org/tensorflow/op/io/DecodePaddedRaw.java | 13 +- .../java/org/tensorflow/op/io/DecodeRaw.java | 13 +- .../op/io/DeserializeManySparse.java | 13 +- .../org/tensorflow/op/io/EncodeBase64.java | 13 +- .../java/org/tensorflow/op/io/FifoQueue.java | 13 +- .../op/io/FixedLengthRecordReader.java | 13 +- .../org/tensorflow/op/io/IdentityReader.java | 13 +- .../java/org/tensorflow/op/io/LmdbReader.java | 13 +- .../org/tensorflow/op/io/MatchingFiles.java | 13 +- .../tensorflow/op/io/PaddingFifoQueue.java | 13 +- .../org/tensorflow/op/io/ParseExample.java | 13 +- .../op/io/ParseSequenceExample.java | 13 +- .../tensorflow/op/io/ParseSingleExample.java | 13 +- .../op/io/ParseSingleSequenceExample.java | 13 +- .../org/tensorflow/op/io/ParseTensor.java | 13 +- .../org/tensorflow/op/io/PriorityQueue.java | 13 +- .../java/org/tensorflow/op/io/QueueClose.java | 13 +- .../org/tensorflow/op/io/QueueDequeue.java | 13 +- .../tensorflow/op/io/QueueDequeueMany.java | 13 +- .../tensorflow/op/io/QueueDequeueUpTo.java | 13 +- .../org/tensorflow/op/io/QueueEnqueue.java | 13 +- .../tensorflow/op/io/QueueEnqueueMany.java | 13 +- .../org/tensorflow/op/io/QueueIsClosed.java | 13 +- .../java/org/tensorflow/op/io/QueueSize.java | 13 +- .../tensorflow/op/io/RandomShuffleQueue.java | 13 +- .../java/org/tensorflow/op/io/ReadFile.java | 13 +- .../op/io/ReaderNumRecordsProduced.java | 13 +- .../op/io/ReaderNumWorkUnitsCompleted.java | 13 +- .../java/org/tensorflow/op/io/ReaderRead.java | 13 +- .../org/tensorflow/op/io/ReaderReadUpTo.java | 13 +- .../org/tensorflow/op/io/ReaderReset.java | 13 +- .../tensorflow/op/io/ReaderRestoreState.java | 13 +- .../op/io/ReaderSerializeState.java | 13 +- .../tensorflow/op/io/SerializeManySparse.java | 13 +- .../org/tensorflow/op/io/SerializeSparse.java | 13 +- .../org/tensorflow/op/io/SerializeTensor.java | 13 +- .../org/tensorflow/op/io/ShardedFilename.java | 13 +- .../org/tensorflow/op/io/ShardedFilespec.java | 13 +- .../org/tensorflow/op/io/TextLineReader.java | 13 +- .../org/tensorflow/op/io/TfRecordReader.java | 13 +- .../org/tensorflow/op/io/WholeFileReader.java | 13 +- .../java/org/tensorflow/op/io/WriteFile.java | 13 +- .../org/tensorflow/op/linalg/BandPart.java | 13 +- .../op/linalg/BandedTriangularSolve.java | 13 +- .../tensorflow/op/linalg/BatchCholesky.java | 13 +- .../op/linalg/BatchCholeskyGrad.java | 13 +- .../op/linalg/BatchMatrixBandPart.java | 13 +- .../op/linalg/BatchMatrixDeterminant.java | 13 +- .../tensorflow/op/linalg/BatchMatrixDiag.java | 13 +- .../op/linalg/BatchMatrixDiagPart.java | 13 +- .../op/linalg/BatchMatrixInverse.java | 13 +- .../op/linalg/BatchMatrixSetDiag.java | 13 +- .../op/linalg/BatchMatrixSolve.java | 13 +- .../op/linalg/BatchMatrixSolveLs.java | 13 +- .../op/linalg/BatchMatrixTriangularSolve.java | 13 +- .../op/linalg/BatchSelfAdjointEig.java | 13 +- .../org/tensorflow/op/linalg/BatchSvd.java | 13 +- .../org/tensorflow/op/linalg/Cholesky.java | 13 +- .../tensorflow/op/linalg/CholeskyGrad.java | 13 +- .../op/linalg/ConjugateTranspose.java | 13 +- .../java/org/tensorflow/op/linalg/Cross.java | 13 +- .../java/org/tensorflow/op/linalg/Det.java | 13 +- .../java/org/tensorflow/op/linalg/Eig.java | 13 +- .../java/org/tensorflow/op/linalg/Einsum.java | 13 +- .../tensorflow/op/linalg/EuclideanNorm.java | 13 +- .../java/org/tensorflow/op/linalg/Inv.java | 13 +- .../op/linalg/LoadAndRemapMatrix.java | 13 +- .../op/linalg/LogMatrixDeterminant.java | 13 +- .../gen/java/org/tensorflow/op/linalg/Lu.java | 13 +- .../java/org/tensorflow/op/linalg/MatMul.java | 13 +- .../org/tensorflow/op/linalg/MatrixDiag.java | 13 +- .../tensorflow/op/linalg/MatrixDiagPart.java | 13 +- .../op/linalg/MatrixDiagPartV3.java | 13 +- .../tensorflow/op/linalg/MatrixDiagV3.java | 13 +- .../tensorflow/op/linalg/MatrixLogarithm.java | 13 +- .../tensorflow/op/linalg/MatrixSetDiag.java | 13 +- .../tensorflow/op/linalg/MatrixSolveLs.java | 13 +- .../gen/java/org/tensorflow/op/linalg/Qr.java | 13 +- .../tensorflow/op/linalg/QuantizedMatMul.java | 13 +- .../op/linalg/QuantizedMatMulWithBias.java | 13 +- .../QuantizedMatMulWithBiasAndRelu.java | 13 +- ...zedMatMulWithBiasAndReluAndRequantize.java | 13 +- .../tensorflow/op/linalg/SelfAdjointEig.java | 13 +- .../java/org/tensorflow/op/linalg/Solve.java | 13 +- .../java/org/tensorflow/op/linalg/Sqrtm.java | 13 +- .../java/org/tensorflow/op/linalg/Svd.java | 13 +- .../org/tensorflow/op/linalg/TensorDiag.java | 13 +- .../tensorflow/op/linalg/TensorDiagPart.java | 13 +- .../org/tensorflow/op/linalg/Transpose.java | 13 +- .../tensorflow/op/linalg/TriangularSolve.java | 13 +- .../op/linalg/TridiagonalMatMul.java | 13 +- .../op/linalg/TridiagonalSolve.java | 13 +- .../sparse/CSRSparseMatrixComponents.java | 13 +- .../linalg/sparse/CSRSparseMatrixToDense.java | 13 +- .../sparse/CSRSparseMatrixToSparseTensor.java | 13 +- .../linalg/sparse/DenseToCSRSparseMatrix.java | 13 +- .../op/linalg/sparse/SparseMatrixAdd.java | 13 +- .../op/linalg/sparse/SparseMatrixMatMul.java | 13 +- .../op/linalg/sparse/SparseMatrixMul.java | 13 +- .../op/linalg/sparse/SparseMatrixNNZ.java | 13 +- .../sparse/SparseMatrixOrderingAMD.java | 13 +- .../op/linalg/sparse/SparseMatrixSoftmax.java | 13 +- .../sparse/SparseMatrixSoftmaxGrad.java | 13 +- .../sparse/SparseMatrixSparseCholesky.java | 13 +- .../sparse/SparseMatrixSparseMatMul.java | 13 +- .../linalg/sparse/SparseMatrixTranspose.java | 13 +- .../op/linalg/sparse/SparseMatrixZeros.java | 13 +- .../sparse/SparseTensorToCSRSparseMatrix.java | 13 +- .../gen/java/org/tensorflow/op/math/Abs.java | 13 +- .../org/tensorflow/op/math/AccumulateN.java | 13 +- .../gen/java/org/tensorflow/op/math/Acos.java | 13 +- .../java/org/tensorflow/op/math/Acosh.java | 13 +- .../gen/java/org/tensorflow/op/math/Add.java | 13 +- .../gen/java/org/tensorflow/op/math/AddN.java | 13 +- .../java/org/tensorflow/op/math/Angle.java | 13 +- .../tensorflow/op/math/ApproximateEqual.java | 13 +- .../java/org/tensorflow/op/math/ArgMax.java | 13 +- .../java/org/tensorflow/op/math/ArgMin.java | 13 +- .../gen/java/org/tensorflow/op/math/Asin.java | 13 +- .../java/org/tensorflow/op/math/Asinh.java | 13 +- .../gen/java/org/tensorflow/op/math/Atan.java | 13 +- .../java/org/tensorflow/op/math/Atan2.java | 13 +- .../java/org/tensorflow/op/math/Atanh.java | 13 +- .../java/org/tensorflow/op/math/BesselI0.java | 13 +- .../org/tensorflow/op/math/BesselI0e.java | 13 +- .../java/org/tensorflow/op/math/BesselI1.java | 13 +- .../org/tensorflow/op/math/BesselI1e.java | 13 +- .../java/org/tensorflow/op/math/Betainc.java | 13 +- .../java/org/tensorflow/op/math/Bincount.java | 13 +- .../gen/java/org/tensorflow/op/math/Ceil.java | 13 +- .../org/tensorflow/op/math/ComplexAbs.java | 13 +- .../gen/java/org/tensorflow/op/math/Conj.java | 13 +- .../gen/java/org/tensorflow/op/math/Cos.java | 13 +- .../gen/java/org/tensorflow/op/math/Cosh.java | 13 +- .../java/org/tensorflow/op/math/Cumprod.java | 13 +- .../java/org/tensorflow/op/math/Cumsum.java | 13 +- .../op/math/CumulativeLogsumexp.java | 13 +- .../org/tensorflow/op/math/DenseBincount.java | 13 +- .../java/org/tensorflow/op/math/Digamma.java | 13 +- .../gen/java/org/tensorflow/op/math/Div.java | 13 +- .../java/org/tensorflow/op/math/DivNoNan.java | 13 +- .../java/org/tensorflow/op/math/Equal.java | 13 +- .../gen/java/org/tensorflow/op/math/Erf.java | 13 +- .../gen/java/org/tensorflow/op/math/Erfc.java | 13 +- .../gen/java/org/tensorflow/op/math/Exp.java | 13 +- .../java/org/tensorflow/op/math/Expm1.java | 13 +- .../gen/java/org/tensorflow/op/math/Fact.java | 13 +- .../java/org/tensorflow/op/math/Floor.java | 13 +- .../java/org/tensorflow/op/math/FloorDiv.java | 13 +- .../java/org/tensorflow/op/math/FloorMod.java | 13 +- .../java/org/tensorflow/op/math/Greater.java | 13 +- .../org/tensorflow/op/math/GreaterEqual.java | 13 +- .../java/org/tensorflow/op/math/Igamma.java | 13 +- .../org/tensorflow/op/math/IgammaGradA.java | 13 +- .../java/org/tensorflow/op/math/Igammac.java | 13 +- .../gen/java/org/tensorflow/op/math/Imag.java | 13 +- .../tensorflow/op/math/InvertPermutation.java | 13 +- .../java/org/tensorflow/op/math/IsFinite.java | 13 +- .../java/org/tensorflow/op/math/IsInf.java | 13 +- .../java/org/tensorflow/op/math/IsNan.java | 13 +- .../gen/java/org/tensorflow/op/math/Less.java | 13 +- .../org/tensorflow/op/math/LessEqual.java | 13 +- .../java/org/tensorflow/op/math/Lgamma.java | 13 +- .../gen/java/org/tensorflow/op/math/Log.java | 13 +- .../java/org/tensorflow/op/math/Log1p.java | 13 +- .../org/tensorflow/op/math/LogicalAnd.java | 13 +- .../org/tensorflow/op/math/LogicalNot.java | 13 +- .../org/tensorflow/op/math/LogicalOr.java | 13 +- .../java/org/tensorflow/op/math/Maximum.java | 13 +- .../gen/java/org/tensorflow/op/math/Mean.java | 13 +- .../java/org/tensorflow/op/math/Minimum.java | 13 +- .../gen/java/org/tensorflow/op/math/Mod.java | 13 +- .../gen/java/org/tensorflow/op/math/Mul.java | 13 +- .../java/org/tensorflow/op/math/MulNoNan.java | 13 +- .../java/org/tensorflow/op/math/Ndtri.java | 13 +- .../gen/java/org/tensorflow/op/math/Neg.java | 13 +- .../org/tensorflow/op/math/NextAfter.java | 13 +- .../java/org/tensorflow/op/math/NotEqual.java | 13 +- .../org/tensorflow/op/math/Polygamma.java | 13 +- .../tensorflow/op/math/PopulationCount.java | 13 +- .../gen/java/org/tensorflow/op/math/Pow.java | 13 +- .../org/tensorflow/op/math/QuantizedAdd.java | 13 +- .../org/tensorflow/op/math/QuantizedMul.java | 13 +- .../gen/java/org/tensorflow/op/math/Real.java | 13 +- .../java/org/tensorflow/op/math/RealDiv.java | 13 +- .../org/tensorflow/op/math/Reciprocal.java | 13 +- .../tensorflow/op/math/ReciprocalGrad.java | 13 +- .../math/RequantizationRangePerChannel.java | 13 +- .../op/math/RequantizePerChannel.java | 13 +- .../gen/java/org/tensorflow/op/math/Rint.java | 13 +- .../java/org/tensorflow/op/math/Round.java | 13 +- .../java/org/tensorflow/op/math/Rsqrt.java | 13 +- .../org/tensorflow/op/math/RsqrtGrad.java | 13 +- .../org/tensorflow/op/math/SegmentMax.java | 13 +- .../org/tensorflow/op/math/SegmentMean.java | 13 +- .../org/tensorflow/op/math/SegmentMin.java | 13 +- .../org/tensorflow/op/math/SegmentProd.java | 13 +- .../org/tensorflow/op/math/SegmentSum.java | 13 +- .../java/org/tensorflow/op/math/Sigmoid.java | 13 +- .../org/tensorflow/op/math/SigmoidGrad.java | 13 +- .../gen/java/org/tensorflow/op/math/Sign.java | 13 +- .../gen/java/org/tensorflow/op/math/Sin.java | 13 +- .../gen/java/org/tensorflow/op/math/Sinh.java | 13 +- .../org/tensorflow/op/math/SobolSample.java | 13 +- .../java/org/tensorflow/op/math/Softplus.java | 13 +- .../org/tensorflow/op/math/SoftplusGrad.java | 13 +- .../gen/java/org/tensorflow/op/math/Sqrt.java | 13 +- .../java/org/tensorflow/op/math/SqrtGrad.java | 13 +- .../java/org/tensorflow/op/math/Square.java | 13 +- .../tensorflow/op/math/SquaredDifference.java | 13 +- .../gen/java/org/tensorflow/op/math/Sub.java | 13 +- .../gen/java/org/tensorflow/op/math/Tan.java | 13 +- .../gen/java/org/tensorflow/op/math/Tanh.java | 13 +- .../java/org/tensorflow/op/math/TanhGrad.java | 13 +- .../org/tensorflow/op/math/TruncateDiv.java | 13 +- .../org/tensorflow/op/math/TruncateMod.java | 13 +- .../op/math/UnsortedSegmentMax.java | 13 +- .../op/math/UnsortedSegmentMin.java | 13 +- .../op/math/UnsortedSegmentProd.java | 13 +- .../op/math/UnsortedSegmentSum.java | 13 +- .../java/org/tensorflow/op/math/Xdivy.java | 13 +- .../java/org/tensorflow/op/math/Xlog1py.java | 13 +- .../java/org/tensorflow/op/math/Xlogy.java | 13 +- .../gen/java/org/tensorflow/op/math/Zeta.java | 13 +- .../java/org/tensorflow/op/math/erfinv.java | 13 +- .../tensorflow/op/math/special/BesselJ0.java | 13 +- .../tensorflow/op/math/special/BesselJ1.java | 13 +- .../tensorflow/op/math/special/BesselK0.java | 13 +- .../tensorflow/op/math/special/BesselK0e.java | 13 +- .../tensorflow/op/math/special/BesselK1.java | 13 +- .../tensorflow/op/math/special/BesselK1e.java | 13 +- .../tensorflow/op/math/special/BesselY0.java | 13 +- .../tensorflow/op/math/special/BesselY1.java | 13 +- .../org/tensorflow/op/math/special/Dawsn.java | 13 +- .../tensorflow/op/math/special/Expint.java | 13 +- .../op/math/special/FresnelCos.java | 13 +- .../op/math/special/FresnelSin.java | 13 +- .../tensorflow/op/math/special/Spence.java | 13 +- .../java/org/tensorflow/op/nn/AvgPool.java | 13 +- .../java/org/tensorflow/op/nn/AvgPool3d.java | 13 +- .../org/tensorflow/op/nn/AvgPool3dGrad.java | 13 +- .../org/tensorflow/op/nn/AvgPoolGrad.java | 13 +- .../nn/BatchNormWithGlobalNormalization.java | 13 +- .../BatchNormWithGlobalNormalizationGrad.java | 13 +- .../java/org/tensorflow/op/nn/BiasAdd.java | 13 +- .../org/tensorflow/op/nn/BiasAddGrad.java | 13 +- .../java/org/tensorflow/op/nn/BlockLSTM.java | 13 +- .../org/tensorflow/op/nn/BlockLSTMGrad.java | 13 +- .../java/org/tensorflow/op/nn/CTCLossV2.java | 13 +- .../op/nn/ComputeAccidentalHits.java | 13 +- .../gen/java/org/tensorflow/op/nn/Conv2d.java | 13 +- .../op/nn/Conv2dBackpropFilter.java | 13 +- .../tensorflow/op/nn/Conv2dBackpropInput.java | 13 +- .../gen/java/org/tensorflow/op/nn/Conv3d.java | 13 +- .../op/nn/Conv3dBackpropFilter.java | 13 +- .../tensorflow/op/nn/Conv3dBackpropInput.java | 13 +- .../op/nn/CtcBeamSearchDecoder.java | 13 +- .../tensorflow/op/nn/CtcGreedyDecoder.java | 13 +- .../java/org/tensorflow/op/nn/CtcLoss.java | 13 +- .../java/org/tensorflow/op/nn/CudnnRNN.java | 13 +- .../tensorflow/op/nn/CudnnRNNBackprop.java | 13 +- .../op/nn/CudnnRNNCanonicalToParams.java | 13 +- .../op/nn/CudnnRNNParamsToCanonical.java | 13 +- .../tensorflow/op/nn/CudnnRnnParamsSize.java | 13 +- .../tensorflow/op/nn/DataFormatDimMap.java | 13 +- .../op/nn/DataFormatVecPermute.java | 13 +- .../org/tensorflow/op/nn/DepthToSpace.java | 13 +- .../op/nn/DepthwiseConv2dNative.java | 13 +- .../DepthwiseConv2dNativeBackpropFilter.java | 13 +- .../DepthwiseConv2dNativeBackpropInput.java | 13 +- .../java/org/tensorflow/op/nn/Dilation2d.java | 13 +- .../op/nn/Dilation2dBackpropFilter.java | 13 +- .../op/nn/Dilation2dBackpropInput.java | 13 +- .../gen/java/org/tensorflow/op/nn/Elu.java | 13 +- .../java/org/tensorflow/op/nn/EluGrad.java | 13 +- .../op/nn/FixedUnigramCandidateSampler.java | 13 +- .../tensorflow/op/nn/FractionalAvgPool.java | 13 +- .../op/nn/FractionalAvgPoolGrad.java | 13 +- .../tensorflow/op/nn/FractionalMaxPool.java | 13 +- .../op/nn/FractionalMaxPoolGrad.java | 13 +- .../org/tensorflow/op/nn/FusedBatchNorm.java | 13 +- .../tensorflow/op/nn/FusedBatchNormGrad.java | 13 +- .../org/tensorflow/op/nn/FusedPadConv2d.java | 13 +- .../op/nn/FusedResizeAndPadConv2d.java | 13 +- .../org/tensorflow/op/nn/GRUBlockCell.java | 13 +- .../tensorflow/op/nn/GRUBlockCellGrad.java | 13 +- .../gen/java/org/tensorflow/op/nn/InTopK.java | 13 +- .../java/org/tensorflow/op/nn/InvGrad.java | 13 +- .../tensorflow/op/nn/IsotonicRegression.java | 13 +- .../gen/java/org/tensorflow/op/nn/L2Loss.java | 13 +- .../org/tensorflow/op/nn/LSTMBlockCell.java | 13 +- .../tensorflow/op/nn/LSTMBlockCellGrad.java | 13 +- .../java/org/tensorflow/op/nn/LeakyRelu.java | 13 +- .../op/nn/LearnedUnigramCandidateSampler.java | 13 +- .../op/nn/LocalResponseNormalization.java | 13 +- .../op/nn/LocalResponseNormalizationGrad.java | 13 +- .../java/org/tensorflow/op/nn/LogSoftmax.java | 13 +- .../java/org/tensorflow/op/nn/MaxPool.java | 13 +- .../java/org/tensorflow/op/nn/MaxPool3d.java | 13 +- .../org/tensorflow/op/nn/MaxPool3dGrad.java | 13 +- .../tensorflow/op/nn/MaxPool3dGradGrad.java | 13 +- .../org/tensorflow/op/nn/MaxPoolGrad.java | 13 +- .../org/tensorflow/op/nn/MaxPoolGradGrad.java | 13 +- .../op/nn/MaxPoolGradGradWithArgmax.java | 13 +- .../op/nn/MaxPoolGradWithArgmax.java | 13 +- .../tensorflow/op/nn/MaxPoolWithArgmax.java | 13 +- .../java/org/tensorflow/op/nn/NthElement.java | 13 +- .../tensorflow/op/nn/QuantizedAvgPool.java | 13 +- ...tizedBatchNormWithGlobalNormalization.java | 13 +- .../tensorflow/op/nn/QuantizedBiasAdd.java | 13 +- .../op/nn/QuantizedConv2DAndRelu.java | 13 +- .../QuantizedConv2DAndReluAndRequantize.java | 13 +- .../op/nn/QuantizedConv2DAndRequantize.java | 13 +- .../op/nn/QuantizedConv2DPerChannel.java | 13 +- .../op/nn/QuantizedConv2DWithBias.java | 13 +- .../op/nn/QuantizedConv2DWithBiasAndRelu.java | 13 +- ...zedConv2DWithBiasAndReluAndRequantize.java | 13 +- .../QuantizedConv2DWithBiasAndRequantize.java | 13 +- ...WithBiasSignedSumAndReluAndRequantize.java | 13 +- .../nn/QuantizedConv2DWithBiasSumAndRelu.java | 13 +- ...Conv2DWithBiasSumAndReluAndRequantize.java | 13 +- .../org/tensorflow/op/nn/QuantizedConv2d.java | 13 +- .../op/nn/QuantizedDepthwiseConv2D.java | 13 +- .../nn/QuantizedDepthwiseConv2DWithBias.java | 13 +- ...antizedDepthwiseConv2DWithBiasAndRelu.java | 13 +- ...iseConv2DWithBiasAndReluAndRequantize.java | 13 +- .../op/nn/QuantizedInstanceNorm.java | 13 +- .../tensorflow/op/nn/QuantizedMaxPool.java | 13 +- .../org/tensorflow/op/nn/QuantizedRelu.java | 13 +- .../org/tensorflow/op/nn/QuantizedRelu6.java | 13 +- .../org/tensorflow/op/nn/QuantizedReluX.java | 13 +- .../gen/java/org/tensorflow/op/nn/Relu.java | 13 +- .../gen/java/org/tensorflow/op/nn/Relu6.java | 13 +- .../java/org/tensorflow/op/nn/Relu6Grad.java | 13 +- .../java/org/tensorflow/op/nn/ReluGrad.java | 13 +- .../gen/java/org/tensorflow/op/nn/Selu.java | 13 +- .../java/org/tensorflow/op/nn/SeluGrad.java | 13 +- .../java/org/tensorflow/op/nn/Softmax.java | 13 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 13 +- .../java/org/tensorflow/op/nn/Softsign.java | 13 +- .../org/tensorflow/op/nn/SoftsignGrad.java | 13 +- .../org/tensorflow/op/nn/SpaceToBatch.java | 13 +- .../org/tensorflow/op/nn/SpaceToDepth.java | 13 +- .../SparseSoftmaxCrossEntropyWithLogits.java | 13 +- .../gen/java/org/tensorflow/op/nn/TopK.java | 13 +- .../op/quantization/Dequantize.java | 13 +- .../quantization/FakeQuantWithMinMaxArgs.java | 13 +- .../FakeQuantWithMinMaxArgsGradient.java | 13 +- .../quantization/FakeQuantWithMinMaxVars.java | 13 +- .../FakeQuantWithMinMaxVarsGradient.java | 13 +- .../FakeQuantWithMinMaxVarsPerChannel.java | 13 +- ...QuantWithMinMaxVarsPerChannelGradient.java | 13 +- .../tensorflow/op/quantization/Quantize.java | 13 +- .../quantization/QuantizeAndDequantize.java | 13 +- .../quantization/QuantizeAndDequantizeV3.java | 13 +- .../quantization/QuantizeAndDequantizeV4.java | 13 +- .../QuantizeAndDequantizeV4Grad.java | 13 +- .../QuantizeDownAndShrinkRange.java | 13 +- .../op/quantization/QuantizedConcat.java | 13 +- .../QuantizedMatMulWithBiasAndDequantize.java | 13 +- .../QuantizedMatMulWithBiasAndRequantize.java | 13 +- .../op/quantization/RequantizationRange.java | 13 +- .../op/quantization/Requantize.java | 13 +- .../tensorflow/op/ragged/RaggedBincount.java | 13 +- .../op/ragged/RaggedCountSparseOutput.java | 13 +- .../org/tensorflow/op/ragged/RaggedCross.java | 13 +- .../tensorflow/op/ragged/RaggedGather.java | 13 +- .../org/tensorflow/op/ragged/RaggedRange.java | 13 +- .../op/ragged/RaggedTensorFromVariant.java | 13 +- .../op/ragged/RaggedTensorToSparse.java | 13 +- .../op/ragged/RaggedTensorToTensor.java | 13 +- .../op/ragged/RaggedTensorToVariant.java | 13 +- .../ragged/RaggedTensorToVariantGradient.java | 13 +- .../op/random/AllCandidateSampler.java | 13 +- .../random/AnonymousRandomSeedGenerator.java | 13 +- .../op/random/AnonymousSeedGenerator.java | 13 +- .../op/random/DeleteRandomSeedGenerator.java | 13 +- .../op/random/DeleteSeedGenerator.java | 13 +- .../op/random/DummySeedGenerator.java | 13 +- .../op/random/LogUniformCandidateSampler.java | 13 +- .../org/tensorflow/op/random/Multinomial.java | 13 +- .../op/random/NonDeterministicInts.java | 13 +- .../random/ParameterizedTruncatedNormal.java | 13 +- .../org/tensorflow/op/random/RandomGamma.java | 13 +- .../tensorflow/op/random/RandomGammaGrad.java | 13 +- .../tensorflow/op/random/RandomPoisson.java | 13 +- .../tensorflow/op/random/RandomShuffle.java | 13 +- .../op/random/RandomStandardNormal.java | 13 +- .../tensorflow/op/random/RandomUniform.java | 13 +- .../op/random/RandomUniformInt.java | 13 +- .../org/tensorflow/op/random/RecordInput.java | 13 +- .../tensorflow/op/random/RngReadAndSkip.java | 13 +- .../org/tensorflow/op/random/RngSkip.java | 13 +- .../op/random/StatefulRandomBinomial.java | 13 +- .../op/random/StatefulStandardNormal.java | 13 +- .../op/random/StatefulTruncatedNormal.java | 13 +- .../tensorflow/op/random/StatefulUniform.java | 13 +- .../op/random/StatefulUniformFullInt.java | 13 +- .../op/random/StatefulUniformInt.java | 13 +- .../op/random/StatelessMultinomial.java | 13 +- ...StatelessParameterizedTruncatedNormal.java | 13 +- .../op/random/StatelessRandomBinomial.java | 13 +- .../op/random/StatelessRandomGamma.java | 13 +- .../StatelessRandomGetKeyCounterAlg.java | 13 +- .../op/random/StatelessRandomNormal.java | 13 +- .../op/random/StatelessRandomNormalV2.java | 13 +- .../op/random/StatelessRandomPoisson.java | 13 +- .../op/random/StatelessRandomUniform.java | 13 +- .../random/StatelessRandomUniformFullInt.java | 13 +- .../StatelessRandomUniformFullIntV2.java | 13 +- .../op/random/StatelessRandomUniformInt.java | 13 +- .../random/StatelessRandomUniformIntV2.java | 13 +- .../op/random/StatelessRandomUniformV2.java | 13 +- .../op/random/StatelessTruncatedNormal.java | 13 +- .../op/random/StatelessTruncatedNormalV2.java | 13 +- .../tensorflow/op/random/TruncatedNormal.java | 13 +- .../op/random/UniformCandidateSampler.java | 13 +- .../op/rawops/CollectiveBcastRecvV2.java | 13 +- .../op/rawops/CollectiveBcastSendV2.java | 13 +- .../org/tensorflow/op/rawops/GetOptions.java | 13 +- ...EmbeddingFrequencyEstimatorParameters.java | 13 +- ...encyEstimatorParametersGradAccumDebug.java | 13 +- ...EmbeddingFrequencyEstimatorParameters.java | 13 +- ...encyEstimatorParametersGradAccumDebug.java | 13 +- .../op/rawops/StatelessRandomGetAlg.java | 13 +- .../rawops/StatelessRandomGetKeyCounter.java | 13 +- .../java/org/tensorflow/op/risc/RiscAbs.java | 13 +- .../java/org/tensorflow/op/risc/RiscAdd.java | 13 +- .../op/risc/RiscBinaryArithmetic.java | 13 +- .../op/risc/RiscBinaryComparison.java | 13 +- .../org/tensorflow/op/risc/RiscBitcast.java | 13 +- .../org/tensorflow/op/risc/RiscBroadcast.java | 13 +- .../java/org/tensorflow/op/risc/RiscCast.java | 13 +- .../java/org/tensorflow/op/risc/RiscCeil.java | 13 +- .../org/tensorflow/op/risc/RiscCholesky.java | 13 +- .../org/tensorflow/op/risc/RiscConcat.java | 13 +- .../org/tensorflow/op/risc/RiscCondition.java | 13 +- .../java/org/tensorflow/op/risc/RiscConv.java | 13 +- .../java/org/tensorflow/op/risc/RiscCos.java | 13 +- .../java/org/tensorflow/op/risc/RiscDiv.java | 13 +- .../java/org/tensorflow/op/risc/RiscDot.java | 13 +- .../java/org/tensorflow/op/risc/RiscExp.java | 13 +- .../java/org/tensorflow/op/risc/RiscFft.java | 13 +- .../org/tensorflow/op/risc/RiscFloor.java | 13 +- .../org/tensorflow/op/risc/RiscGather.java | 13 +- .../java/org/tensorflow/op/risc/RiscImag.java | 13 +- .../org/tensorflow/op/risc/RiscIsFinite.java | 13 +- .../java/org/tensorflow/op/risc/RiscLog.java | 13 +- .../tensorflow/op/risc/RiscLogicalAnd.java | 13 +- .../tensorflow/op/risc/RiscLogicalNot.java | 13 +- .../org/tensorflow/op/risc/RiscLogicalOr.java | 13 +- .../java/org/tensorflow/op/risc/RiscMax.java | 13 +- .../java/org/tensorflow/op/risc/RiscMin.java | 13 +- .../java/org/tensorflow/op/risc/RiscMul.java | 13 +- .../java/org/tensorflow/op/risc/RiscNeg.java | 13 +- .../java/org/tensorflow/op/risc/RiscPad.java | 13 +- .../java/org/tensorflow/op/risc/RiscPool.java | 13 +- .../java/org/tensorflow/op/risc/RiscPow.java | 13 +- .../tensorflow/op/risc/RiscRandomUniform.java | 13 +- .../java/org/tensorflow/op/risc/RiscReal.java | 13 +- .../org/tensorflow/op/risc/RiscReduce.java | 13 +- .../java/org/tensorflow/op/risc/RiscRem.java | 13 +- .../org/tensorflow/op/risc/RiscReshape.java | 13 +- .../org/tensorflow/op/risc/RiscReverse.java | 13 +- .../org/tensorflow/op/risc/RiscScatter.java | 13 +- .../org/tensorflow/op/risc/RiscShape.java | 13 +- .../java/org/tensorflow/op/risc/RiscSign.java | 13 +- .../org/tensorflow/op/risc/RiscSlice.java | 13 +- .../java/org/tensorflow/op/risc/RiscSort.java | 13 +- .../org/tensorflow/op/risc/RiscSqueeze.java | 13 +- .../java/org/tensorflow/op/risc/RiscSub.java | 13 +- .../org/tensorflow/op/risc/RiscTranspose.java | 13 +- .../op/risc/RiscTriangularSolve.java | 13 +- .../org/tensorflow/op/risc/RiscUnary.java | 13 +- .../org/tensorflow/op/risc/RiscWhile.java | 13 +- .../org/tensorflow/op/signal/BatchFft.java | 13 +- .../org/tensorflow/op/signal/BatchFft2d.java | 13 +- .../org/tensorflow/op/signal/BatchFft3d.java | 13 +- .../org/tensorflow/op/signal/BatchIfft.java | 13 +- .../org/tensorflow/op/signal/BatchIfft2d.java | 13 +- .../org/tensorflow/op/signal/BatchIfft3d.java | 13 +- .../java/org/tensorflow/op/signal/Fft.java | 13 +- .../java/org/tensorflow/op/signal/Fft2d.java | 13 +- .../java/org/tensorflow/op/signal/Fft3d.java | 13 +- .../java/org/tensorflow/op/signal/Ifft.java | 13 +- .../java/org/tensorflow/op/signal/Ifft2d.java | 13 +- .../java/org/tensorflow/op/signal/Ifft3d.java | 13 +- .../java/org/tensorflow/op/signal/Irfft.java | 13 +- .../org/tensorflow/op/signal/Irfft2d.java | 13 +- .../org/tensorflow/op/signal/Irfft3d.java | 13 +- .../java/org/tensorflow/op/signal/Rfft.java | 13 +- .../java/org/tensorflow/op/signal/Rfft2d.java | 13 +- .../java/org/tensorflow/op/signal/Rfft3d.java | 13 +- .../op/sparse/AddManySparseToTensorsMap.java | 13 +- .../op/sparse/AddSparseToTensorsMap.java | 13 +- .../op/sparse/DenseCountSparseOutput.java | 13 +- .../op/sparse/DenseToDenseSetOperation.java | 13 +- .../op/sparse/DenseToSparseSetOperation.java | 13 +- .../op/sparse/DeserializeSparse.java | 13 +- .../SparseAccumulatorApplyGradient.java | 13 +- .../sparse/SparseAccumulatorTakeGradient.java | 13 +- .../org/tensorflow/op/sparse/SparseAdd.java | 13 +- .../tensorflow/op/sparse/SparseAddGrad.java | 13 +- .../tensorflow/op/sparse/SparseBincount.java | 13 +- .../tensorflow/op/sparse/SparseConcat.java | 13 +- .../sparse/SparseConditionalAccumulator.java | 13 +- .../op/sparse/SparseCountSparseOutput.java | 13 +- .../org/tensorflow/op/sparse/SparseCross.java | 13 +- .../op/sparse/SparseCrossHashed.java | 13 +- .../op/sparse/SparseDenseCwiseAdd.java | 13 +- .../op/sparse/SparseDenseCwiseDiv.java | 13 +- .../op/sparse/SparseDenseCwiseMul.java | 13 +- .../op/sparse/SparseFillEmptyRows.java | 13 +- .../op/sparse/SparseFillEmptyRowsGrad.java | 13 +- .../tensorflow/op/sparse/SparseMatMul.java | 13 +- .../tensorflow/op/sparse/SparseReduceMax.java | 13 +- .../op/sparse/SparseReduceMaxSparse.java | 13 +- .../tensorflow/op/sparse/SparseReduceSum.java | 13 +- .../op/sparse/SparseReduceSumSparse.java | 13 +- .../tensorflow/op/sparse/SparseReorder.java | 13 +- .../tensorflow/op/sparse/SparseReshape.java | 13 +- .../op/sparse/SparseSegmentMean.java | 13 +- .../op/sparse/SparseSegmentMeanGrad.java | 13 +- .../SparseSegmentMeanWithNumSegments.java | 13 +- .../op/sparse/SparseSegmentSqrtN.java | 13 +- .../op/sparse/SparseSegmentSqrtNGrad.java | 13 +- .../SparseSegmentSqrtNWithNumSegments.java | 13 +- .../op/sparse/SparseSegmentSum.java | 13 +- .../op/sparse/SparseSegmentSumGrad.java | 13 +- .../SparseSegmentSumWithNumSegments.java | 13 +- .../org/tensorflow/op/sparse/SparseSlice.java | 13 +- .../tensorflow/op/sparse/SparseSliceGrad.java | 13 +- .../tensorflow/op/sparse/SparseSoftmax.java | 13 +- .../op/sparse/SparseSparseMaximum.java | 13 +- .../op/sparse/SparseSparseMinimum.java | 13 +- .../org/tensorflow/op/sparse/SparseSplit.java | 13 +- .../op/sparse/SparseTensorDenseAdd.java | 13 +- .../op/sparse/SparseTensorDenseMatMul.java | 13 +- .../tensorflow/op/sparse/SparseToDense.java | 13 +- .../op/sparse/SparseToSparseSetOperation.java | 13 +- .../sparse/TakeManySparseFromTensorsMap.java | 13 +- .../java/org/tensorflow/op/strings/Join.java | 13 +- .../java/org/tensorflow/op/strings/Lower.java | 13 +- .../org/tensorflow/op/strings/ReduceJoin.java | 13 +- .../tensorflow/op/strings/RegexFullMatch.java | 13 +- .../tensorflow/op/strings/RegexReplace.java | 13 +- .../op/strings/StaticRegexFullMatch.java | 13 +- .../op/strings/StaticRegexReplace.java | 13 +- .../tensorflow/op/strings/StringFormat.java | 13 +- .../tensorflow/op/strings/StringLength.java | 13 +- .../tensorflow/op/strings/StringNGrams.java | 13 +- .../tensorflow/op/strings/StringSplit.java | 13 +- .../java/org/tensorflow/op/strings/Strip.java | 13 +- .../org/tensorflow/op/strings/Substr.java | 13 +- .../tensorflow/op/strings/ToHashBucket.java | 13 +- .../op/strings/ToHashBucketFast.java | 13 +- .../op/strings/ToHashBucketStrong.java | 13 +- .../org/tensorflow/op/strings/ToNumber.java | 13 +- .../tensorflow/op/strings/UnicodeDecode.java | 13 +- .../op/strings/UnicodeDecodeWithOffsets.java | 13 +- .../tensorflow/op/strings/UnicodeEncode.java | 13 +- .../tensorflow/op/strings/UnicodeScript.java | 13 +- .../op/strings/UnicodeTranscode.java | 13 +- .../op/strings/UnsortedSegmentJoin.java | 13 +- .../java/org/tensorflow/op/strings/Upper.java | 13 +- .../tensorflow/op/summary/AudioSummary.java | 13 +- .../op/summary/CloseSummaryWriter.java | 13 +- .../op/summary/CreateSummaryDbWriter.java | 13 +- .../op/summary/CreateSummaryFileWriter.java | 13 +- .../op/summary/FlushSummaryWriter.java | 13 +- .../op/summary/HistogramSummary.java | 13 +- .../tensorflow/op/summary/ImageSummary.java | 13 +- .../tensorflow/op/summary/ImportEvent.java | 13 +- .../tensorflow/op/summary/MergeSummary.java | 13 +- .../tensorflow/op/summary/ScalarSummary.java | 13 +- .../op/summary/StatsAggregatorSummary.java | 13 +- .../tensorflow/op/summary/SummaryWriter.java | 13 +- .../tensorflow/op/summary/TensorSummary.java | 13 +- .../op/summary/WriteAudioSummary.java | 13 +- .../op/summary/WriteGraphSummary.java | 13 +- .../op/summary/WriteHistogramSummary.java | 13 +- .../op/summary/WriteImageSummary.java | 13 +- .../op/summary/WriteRawProtoSummary.java | 13 +- .../op/summary/WriteScalarSummary.java | 13 +- .../tensorflow/op/summary/WriteSummary.java | 13 +- .../java/org/tensorflow/op/tpu/AllToAll.java | 13 +- .../tensorflow/op/tpu/CollectivePermute.java | 13 +- .../tensorflow/op/tpu/CompilationResult.java | 13 +- .../java/org/tensorflow/op/tpu/Compile.java | 13 +- .../op/tpu/CompileSucceededAssert.java | 13 +- .../op/tpu/ConfigureDistributedTPU.java | 13 +- .../op/tpu/ConfigureTPUEmbedding.java | 13 +- .../tensorflow/op/tpu/CrossReplicaSum.java | 13 +- .../op/tpu/EmbeddingActivations.java | 13 +- .../tpu/EnqueueTPUEmbeddingIntegerBatch.java | 13 +- .../EnqueueTPUEmbeddingRaggedTensorBatch.java | 13 +- .../tpu/EnqueueTPUEmbeddingSparseBatch.java | 13 +- .../EnqueueTPUEmbeddingSparseTensorBatch.java | 13 +- .../java/org/tensorflow/op/tpu/Execute.java | 13 +- .../op/tpu/ExecuteAndUpdateVariables.java | 13 +- .../org/tensorflow/op/tpu/InfeedDequeue.java | 13 +- .../tensorflow/op/tpu/InfeedDequeueTuple.java | 13 +- .../org/tensorflow/op/tpu/InfeedEnqueue.java | 13 +- .../tpu/InfeedEnqueuePrelinearizedBuffer.java | 13 +- .../tensorflow/op/tpu/InfeedEnqueueTuple.java | 13 +- .../tpu/LoadTPUEmbeddingADAMParameters.java | 13 +- ...EmbeddingADAMParametersGradAccumDebug.java | 13 +- .../LoadTPUEmbeddingAdadeltaParameters.java | 13 +- ...ddingAdadeltaParametersGradAccumDebug.java | 13 +- .../LoadTPUEmbeddingAdagradParameters.java | 13 +- ...eddingAdagradParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingCenteredRMSPropParameters.java | 13 +- .../tpu/LoadTPUEmbeddingFTRLParameters.java | 13 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingMDLAdagradLightParameters.java | 13 +- .../LoadTPUEmbeddingMomentumParameters.java | 13 +- ...ddingMomentumParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingProximalAdagradParameters.java | 13 +- ...oximalAdagradParametersGradAccumDebug.java | 13 +- ...oadTPUEmbeddingProximalYogiParameters.java | 13 +- ...gProximalYogiParametersGradAccumDebug.java | 13 +- .../LoadTPUEmbeddingRMSPropParameters.java | 13 +- ...eddingRMSPropParametersGradAccumDebug.java | 13 +- ...ngStochasticGradientDescentParameters.java | 13 +- ...adientDescentParametersGradAccumDebug.java | 13 +- .../tensorflow/op/tpu/OrdinalSelector.java | 13 +- .../org/tensorflow/op/tpu/OutfeedDequeue.java | 13 +- .../op/tpu/OutfeedDequeueTuple.java | 13 +- .../op/tpu/OutfeedDequeueTupleV2.java | 13 +- .../tensorflow/op/tpu/OutfeedDequeueV2.java | 13 +- .../org/tensorflow/op/tpu/OutfeedEnqueue.java | 13 +- .../op/tpu/OutfeedEnqueueTuple.java | 13 +- .../tensorflow/op/tpu/PartitionedCall.java | 13 +- .../tensorflow/op/tpu/PartitionedInput.java | 13 +- .../tensorflow/op/tpu/PartitionedOutput.java | 13 +- .../org/tensorflow/op/tpu/Prelinearize.java | 13 +- .../tensorflow/op/tpu/PrelinearizeTuple.java | 13 +- .../op/tpu/RecvTPUEmbeddingActivations.java | 13 +- .../tensorflow/op/tpu/ReplicateMetadata.java | 13 +- .../tensorflow/op/tpu/ReplicatedInput.java | 13 +- .../tensorflow/op/tpu/ReplicatedOutput.java | 13 +- .../RetrieveTPUEmbeddingADAMParameters.java | 13 +- ...EmbeddingADAMParametersGradAccumDebug.java | 13 +- ...etrieveTPUEmbeddingAdadeltaParameters.java | 13 +- ...ddingAdadeltaParametersGradAccumDebug.java | 13 +- ...RetrieveTPUEmbeddingAdagradParameters.java | 13 +- ...eddingAdagradParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingCenteredRMSPropParameters.java | 13 +- .../RetrieveTPUEmbeddingFTRLParameters.java | 13 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingMDLAdagradLightParameters.java | 13 +- ...etrieveTPUEmbeddingMomentumParameters.java | 13 +- ...ddingMomentumParametersGradAccumDebug.java | 13 +- ...TPUEmbeddingProximalAdagradParameters.java | 13 +- ...oximalAdagradParametersGradAccumDebug.java | 13 +- ...eveTPUEmbeddingProximalYogiParameters.java | 13 +- ...gProximalYogiParametersGradAccumDebug.java | 13 +- ...RetrieveTPUEmbeddingRMSPropParameters.java | 13 +- ...eddingRMSPropParametersGradAccumDebug.java | 13 +- ...ngStochasticGradientDescentParameters.java | 13 +- ...adientDescentParametersGradAccumDebug.java | 13 +- .../op/tpu/SendTPUEmbeddingGradients.java | 13 +- .../op/tpu/ShutdownDistributedTPU.java | 13 +- .../op/tpu/TPUCompilationResult.java | 13 +- .../op/tpu/TPUEmbeddingActivations.java | 13 +- .../op/tpu/TPUReplicateMetadata.java | 13 +- .../tensorflow/op/tpu/TPUReplicatedInput.java | 13 +- .../op/tpu/TPUReplicatedOutput.java | 13 +- .../op/tpu/TPUReshardVariables.java | 13 +- .../tensorflow/op/tpu/WorkerHeartbeat.java | 13 +- .../op/train/AccumulatorApplyGradient.java | 13 +- .../op/train/AccumulatorNumAccumulated.java | 13 +- .../op/train/AccumulatorSetGlobalStep.java | 13 +- .../op/train/AccumulatorTakeGradient.java | 13 +- .../org/tensorflow/op/train/ApplyAdaMax.java | 13 +- .../tensorflow/op/train/ApplyAdadelta.java | 13 +- .../org/tensorflow/op/train/ApplyAdagrad.java | 13 +- .../tensorflow/op/train/ApplyAdagradDa.java | 13 +- .../tensorflow/op/train/ApplyAdagradV2.java | 13 +- .../org/tensorflow/op/train/ApplyAdam.java | 13 +- .../org/tensorflow/op/train/ApplyAddSign.java | 13 +- .../op/train/ApplyCenteredRmsProp.java | 13 +- .../org/tensorflow/op/train/ApplyFtrl.java | 13 +- .../op/train/ApplyGradientDescent.java | 13 +- .../tensorflow/op/train/ApplyMomentum.java | 13 +- .../tensorflow/op/train/ApplyPowerSign.java | 13 +- .../op/train/ApplyProximalAdagrad.java | 13 +- .../train/ApplyProximalGradientDescent.java | 13 +- .../org/tensorflow/op/train/ApplyRmsProp.java | 13 +- .../org/tensorflow/op/train/BatchMatMul.java | 13 +- .../tensorflow/op/train/ComputeBatchSize.java | 13 +- .../op/train/ConditionalAccumulator.java | 13 +- .../op/train/GenerateVocabRemapping.java | 13 +- .../op/train/MergeV2Checkpoints.java | 13 +- .../org/tensorflow/op/train/NegTrain.java | 13 +- .../tensorflow/op/train/PreventGradient.java | 13 +- .../ResourceAccumulatorApplyGradient.java | 13 +- .../ResourceAccumulatorNumAccumulated.java | 13 +- .../ResourceAccumulatorSetGlobalStep.java | 13 +- .../ResourceAccumulatorTakeGradient.java | 13 +- .../op/train/ResourceApplyAdaMax.java | 13 +- .../op/train/ResourceApplyAdadelta.java | 13 +- .../op/train/ResourceApplyAdagrad.java | 13 +- .../op/train/ResourceApplyAdagradDa.java | 13 +- .../op/train/ResourceApplyAdam.java | 13 +- .../train/ResourceApplyAdamWithAmsgrad.java | 13 +- .../op/train/ResourceApplyAddSign.java | 13 +- .../train/ResourceApplyCenteredRmsProp.java | 13 +- .../op/train/ResourceApplyFtrl.java | 13 +- .../train/ResourceApplyGradientDescent.java | 13 +- .../op/train/ResourceApplyKerasMomentum.java | 13 +- .../op/train/ResourceApplyMomentum.java | 13 +- .../op/train/ResourceApplyPowerSign.java | 13 +- .../train/ResourceApplyProximalAdagrad.java | 13 +- .../ResourceApplyProximalGradientDescent.java | 13 +- .../op/train/ResourceApplyRmsProp.java | 13 +- .../train/ResourceConditionalAccumulator.java | 13 +- .../op/train/ResourceSparseApplyAdadelta.java | 13 +- .../op/train/ResourceSparseApplyAdagrad.java | 13 +- .../train/ResourceSparseApplyAdagradDa.java | 13 +- .../train/ResourceSparseApplyAdagradV2.java | 13 +- .../ResourceSparseApplyCenteredRmsProp.java | 13 +- .../op/train/ResourceSparseApplyFtrl.java | 13 +- .../ResourceSparseApplyKerasMomentum.java | 13 +- .../op/train/ResourceSparseApplyMomentum.java | 13 +- .../ResourceSparseApplyProximalAdagrad.java | 13 +- ...rceSparseApplyProximalGradientDescent.java | 13 +- .../op/train/ResourceSparseApplyRmsProp.java | 13 +- .../java/org/tensorflow/op/train/Restore.java | 13 +- .../org/tensorflow/op/train/RestoreSlice.java | 13 +- .../java/org/tensorflow/op/train/Save.java | 13 +- .../org/tensorflow/op/train/SaveSlices.java | 13 +- .../org/tensorflow/op/train/SdcaFprint.java | 13 +- .../tensorflow/op/train/SdcaOptimizer.java | 13 +- .../org/tensorflow/op/train/SdcaShrinkL1.java | 13 +- .../op/train/SparseApplyAdadelta.java | 13 +- .../op/train/SparseApplyAdagrad.java | 13 +- .../op/train/SparseApplyAdagradDa.java | 13 +- .../op/train/SparseApplyCenteredRmsProp.java | 13 +- .../tensorflow/op/train/SparseApplyFtrl.java | 13 +- .../op/train/SparseApplyMomentum.java | 13 +- .../op/train/SparseApplyProximalAdagrad.java | 13 +- .../SparseApplyProximalGradientDescent.java | 13 +- .../op/train/SparseApplyRmsProp.java | 13 +- .../tensorflow/op/train/SymbolicGradient.java | 13 +- .../org/tensorflow/op/train/TileGrad.java | 13 +- .../tensorflow/op/xla/BroadcastHelper.java | 13 +- .../org/tensorflow/op/xla/ClusterOutput.java | 13 +- .../gen/java/org/tensorflow/op/xla/Conv.java | 13 +- .../org/tensorflow/op/xla/Dequantize.java | 13 +- .../gen/java/org/tensorflow/op/xla/Dot.java | 13 +- .../org/tensorflow/op/xla/DynamicSlice.java | 13 +- .../tensorflow/op/xla/DynamicUpdateSlice.java | 13 +- .../java/org/tensorflow/op/xla/Einsum.java | 13 +- .../java/org/tensorflow/op/xla/Gather.java | 13 +- .../gen/java/org/tensorflow/op/xla/If.java | 13 +- .../org/tensorflow/op/xla/KeyValueSort.java | 13 +- .../gen/java/org/tensorflow/op/xla/Pad.java | 13 +- .../gen/java/org/tensorflow/op/xla/Recv.java | 13 +- .../java/org/tensorflow/op/xla/Reduce.java | 13 +- .../org/tensorflow/op/xla/ReduceWindow.java | 13 +- .../op/xla/RemoveDynamicDimensionSize.java | 13 +- .../java/org/tensorflow/op/xla/ReplicaId.java | 13 +- .../java/org/tensorflow/op/xla/Scatter.java | 13 +- .../tensorflow/op/xla/SelectAndScatter.java | 13 +- .../org/tensorflow/op/xla/SelfAdjointEig.java | 13 +- .../gen/java/org/tensorflow/op/xla/Send.java | 13 +- .../op/xla/SetDynamicDimensionSize.java | 13 +- .../java/org/tensorflow/op/xla/Sharding.java | 13 +- .../gen/java/org/tensorflow/op/xla/Sort.java | 13 +- .../op/xla/SpmdFullToShardShape.java | 13 +- .../op/xla/SpmdShardToFullShape.java | 13 +- .../gen/java/org/tensorflow/op/xla/Svd.java | 13 +- .../gen/java/org/tensorflow/op/xla/While.java | 13 +- .../org/tensorflow/op/xla/XlaHostCompute.java | 13 +- .../java/org/tensorflow/op/xla/XlaLaunch.java | 13 +- .../tensorflow/op/xla/XlaRecvFromHost.java | 13 +- .../org/tensorflow/op/xla/XlaSendToHost.java | 13 +- .../org/tensorflow/op/xla/XlaSetBound.java | 13 +- .../tensorflow/op/xla/XlaVariadicReduce.java | 13 +- .../tensorflow/op/xla/XlaVariadicSort.java | 13 +- .../org/tensorflow/BaseGradientAdapter.java | 90 +++++ .../java/org/tensorflow/EagerSession.java | 3 +- .../src/main/java/org/tensorflow/Graph.java | 70 +++- .../java/org/tensorflow/GraphOperation.java | 20 +- .../org/tensorflow/GraphOperationBuilder.java | 71 +++- .../main/java/org/tensorflow/TensorFlow.java | 125 +++++- .../internal/c_api/presets/tensorflow.java | 165 +++++++- .../org/tensorflow/op/CustomGradient.java | 62 +++ .../java/org/tensorflow/op/GradientScope.java | 159 ++++++++ .../main/java/org/tensorflow/op/OpScope.java | 153 ++++++++ .../org/tensorflow/op/RawCustomGradient.java | 59 +++ .../org/tensorflow/op/RawGradientAdapter.java | 71 ++++ .../main/java/org/tensorflow/op/RawOp.java | 13 +- .../main/java/org/tensorflow/op/Scope.java | 157 ++------ .../tensorflow/op/TypedGradientAdapter.java | 82 ++++ .../op/annotation/OpInputsMetadata.java | 36 ++ .../tensorflow/op/annotation/OpMetadata.java | 40 ++ .../java/org/tensorflow/op/core/Constant.java | 2 +- .../c_api/include/tensorflow_adapters.h | 42 ++ .../org/tensorflow/CustomGradientTest.java | 81 ++++ .../org/tensorflow/GraphOperationTest.java | 1 + .../test/java/org/tensorflow/GraphTest.java | 3 +- .../test/java/org/tensorflow/SessionTest.java | 2 +- .../java/org/tensorflow/op/RawOpTest.java | 18 +- .../java/org/tensorflow/op/ScopeTest.java | 18 +- .../tensorflow/op/core/BooleanMaskTest.java | 33 +- .../op/core/BooleanMaskUpdateTest.java | 62 +-- .../org/tensorflow/op/core/ConstantTest.java | 11 +- .../org/tensorflow/op/core/IndexingTest.java | 39 +- .../org/tensorflow/op/core/ShapesTest.java | 108 +++--- .../org/tensorflow/op/core/ZerosTest.java | 39 +- .../src/main/java/org/tensorflow/Names.java | 4 + .../op/generator/ClassGenerator.java | 51 ++- .../processor/operator/OperatorProcessor.java | 4 +- .../tensorflow/framework/op/FrameworkOps.java | 5 +- .../framework/optimizers/Optimizer.java | 3 +- 1367 files changed, 17408 insertions(+), 2986 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch create mode 100644 tensorflow-core/tensorflow-core-api/external/custom-grad-symbols.patch create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/BaseGradientAdapter.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/CustomGradient.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/GradientScope.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/OpScope.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawCustomGradient.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawGradientAdapter.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/TypedGradientAdapter.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpInputsMetadata.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpMetadata.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/resources/org/tensorflow/internal/c_api/include/tensorflow_adapters.h create mode 100644 tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index f6aa07115ed..013338cfbba 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -12,6 +12,8 @@ http_archive( # ":tensorflow-macosx.patch", # ":tensorflow-windows.patch", # https://github.com/tensorflow/tensorflow/issues/25213 ":tensorflow-proto.patch", + ":custom-grad-helpers.patch", + ":custom-grad-symbols.patch", ], patch_tool = "patch", patch_args = ["-p1"], diff --git a/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch b/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch new file mode 100644 index 00000000000..aedf53e2e56 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch @@ -0,0 +1,57 @@ +diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc +index f3bf7b98a1e6b..c9194c36c116b 100644 +--- a/tensorflow/c/c_api.cc ++++ b/tensorflow/c/c_api.cc +@@ -782,9 +782,9 @@ void TF_GraphGetTensorShape(TF_Graph* graph, TF_Output output, int64_t* dims, + + extern "C" { + +-static TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, +- const char* op_type, +- const char* oper_name) ++TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, ++ const char* op_type, ++ const char* oper_name) + TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { + return new TF_OperationDescription(graph, op_type, oper_name); + } +@@ -1041,8 +1041,8 @@ void TF_SetAttrValueProto(TF_OperationDescription* desc, const char* attr_name, + status->status = Status::OK(); + } + +-static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, +- TF_Status* status) ++TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, ++ TF_Status* status) + TF_EXCLUSIVE_LOCKS_REQUIRED(desc->graph->mu) { + Node* ret = nullptr; + +diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h +index 705cf85e0512f..fb746dd4af94f 100644 +--- a/tensorflow/c/c_api.h ++++ b/tensorflow/c/c_api.h +@@ -255,6 +255,12 @@ TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph, + int64_t* dims, int num_dims, + TF_Status* status); + ++// TF_NewOperation, but without locking the graph. ++// Should prefer TF_NewOperation when possible. ++TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, ++ const char* op_type, ++ const char* oper_name); ++ + // Operation will only be added to *graph when TF_FinishOperation() is + // called (assuming TF_FinishOperation() does not return an error). + // *graph must not be deleted until after TF_FinishOperation() is +@@ -406,6 +412,11 @@ TF_CAPI_EXPORT extern void TF_SetAttrValueProto(TF_OperationDescription* desc, + size_t proto_len, + TF_Status* status); + ++// TF_FinishOperation, but without locking the graph. ++// TF_FinishOperation should be preferred when possible. ++TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, ++ TF_Status* status); ++ + // If this function succeeds: + // * *status is set to an OK value, + // * a TF_Operation is added to the graph, diff --git a/tensorflow-core/tensorflow-core-api/external/custom-grad-symbols.patch b/tensorflow-core/tensorflow-core-api/external/custom-grad-symbols.patch new file mode 100644 index 00000000000..c47b9da0127 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/external/custom-grad-symbols.patch @@ -0,0 +1,151 @@ +Index: tensorflow/tools/def_file_filter/BUILD +IDEA additional info: +Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP +<+>UTF-8 +=================================================================== +diff --git a/tensorflow/tools/def_file_filter/BUILD b/tensorflow/tools/def_file_filter/BUILD +--- a/tensorflow/tools/def_file_filter/BUILD (revision 5e5cc35b4c0f629a1e092b540fdf2b63367aa5ad) ++++ b/tensorflow/tools/def_file_filter/BUILD (date 1629063191558) +@@ -12,3 +12,8 @@ + name = "symbols_pybind", + srcs = ["symbols_pybind.txt"], + ) ++ ++filegroup( ++ name = "symbols_java", ++ srcs = ["symbols_java.txt"], ++) +Index: tensorflow/BUILD +IDEA additional info: +Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP +<+>UTF-8 +=================================================================== +diff --git a/tensorflow/BUILD b/tensorflow/BUILD +--- a/tensorflow/BUILD (revision 5e5cc35b4c0f629a1e092b540fdf2b63367aa5ad) ++++ b/tensorflow/BUILD (date 1629063361078) +@@ -1069,13 +1069,20 @@ + # the dynamic libraries of custom ops can find it at runtime. + genrule( + name = "tensorflow_filtered_def_file", +- srcs = [":tensorflow_def_file"], ++ srcs = [ ++ ":tensorflow_def_file", ++ ":java_symbol_target_libs_file", ++ ":win_lib_files_for_java_exported_symbols", ++ "//tensorflow/tools/def_file_filter:symbols_java", ++ ], + outs = ["tensorflow_filtered_def_file.def"], + cmd = select({ + "//tensorflow:windows": """ + $(location @local_config_def_file_filter//:def_file_filter) \\ + --input $(location :tensorflow_def_file) \\ +- --output $@ ++ --output $@ \\ ++ --symbols $(location //tensorflow/tools/def_file_filter:symbols_java) \\ ++ --lib_paths_file $(location :java_symbol_target_libs_file) + """, + "//conditions:default": "touch $@", # Just a placeholder for Unix platforms + }), +@@ -1083,6 +1090,34 @@ + visibility = ["//visibility:public"], + ) + ++# Write to a file a list of all cc_library targets that we need for exporting symbols on Windows. ++genrule( ++ name = "java_symbol_target_libs_file", ++ srcs = [":win_lib_files_for_java_exported_symbols"], ++ outs = ["java_symbol_target_libs_file.txt"], ++ cmd = select({ ++ "//tensorflow:windows": """ ++ for SRC in $(SRCS); do ++ echo $$SRC | sed 's/third_party\\///g' >> $@ ++ done ++ """, ++ "//conditions:default": "touch $@", # Just a placeholder for Unix platforms ++ }), ++ visibility = ["//visibility:public"], ++) ++ ++filegroup( ++ name = "win_lib_files_for_java_exported_symbols", ++ srcs = [ ++ "//tensorflow/cc:scope", ++ "//tensorflow/cc:grad_op_registry", ++ "//tensorflow/c:tf_status_helper", ++ "//tensorflow/cc:ops" ++ ], ++ visibility = ["//visibility:private"], ++) ++ ++ + # The interface library (tensorflow.dll.if.lib) for linking tensorflow DLL library (tensorflow.dll) on Windows. + # To learn more about import library (called interface library in Bazel): + # https://docs.microsoft.com/en-us/cpp/build/linking-an-executable-to-a-dll?view=vs-2017#linking-implicitly +Index: tensorflow/tools/def_file_filter/BUILD.tpl +IDEA additional info: +Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP +<+>UTF-8 +=================================================================== +diff --git a/tensorflow/tools/def_file_filter/BUILD.tpl b/tensorflow/tools/def_file_filter/BUILD.tpl +--- a/tensorflow/tools/def_file_filter/BUILD.tpl (revision 5e5cc35b4c0f629a1e092b540fdf2b63367aa5ad) ++++ b/tensorflow/tools/def_file_filter/BUILD.tpl (date 1629063191583) +@@ -18,3 +18,8 @@ + name = "symbols_pybind", + srcs = ["symbols_pybind.txt"], + ) ++ ++filegroup( ++ name = "symbols_java", ++ srcs = ["symbols_java.txt"], ++) +Index: tensorflow/tools/def_file_filter/symbols_java.txt +IDEA additional info: +Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP +<+>UTF-8 +=================================================================== +diff --git a/tensorflow/tools/def_file_filter/symbols_java.txt b/tensorflow/tools/def_file_filter/symbols_java.txt +new file mode 100644 +--- /dev/null (date 1629063607794) ++++ b/tensorflow/tools/def_file_filter/symbols_java.txt (date 1629063607794) +@@ -0,0 +1,26 @@ ++[//tensorflow/cc:scope] # scope ++tensorflow::Scope::graph ++tensorflow::Scope::ok ++tensorflow::Scope::UpdateBuilder ++tensorflow::Scope::GetUniqueNameForOp ++tensorflow::Scope::ExitOnError ++tensorflow::Scope::WithDevice ++tensorflow::Scope::WithNoControlDependencies ++tensorflow::Scope::WithControlDependencies ++tensorflow::Scope::NewSubScope ++tensorflow::Scope::NewRootScope ++tensorflow::Scope::operator= ++tensorflow::Scope::~Scope ++tensorflow::Scope::Scope ++ ++[//tensorflow/cc:ops] ++tensorflow::Operation::Operation ++ ++[//tensorflow/cc:grad_op_registry] # custom gradients for graph ++tensorflow::ops::GradOpRegistry::Global ++tensorflow::ops::GradOpRegistry::Lookup ++tensorflow::ops::GradOpRegistry::Register ++ ++[//tensorflow/c:tf_status_helper] # status helpers ++tensorflow::Set_TF_Status_from_Status ++tensorflow::StatusFromTF_Status +=================================================================== +diff --git a/tensorflow/tools/def_file_filter/def_file_filter.py.tpl b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl +--- a/tensorflow/tools/def_file_filter/def_file_filter.py.tpl (revision 919f693420e35d00c8d0a42100837ae3718f7927) ++++ b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl (date 1632048268359) +@@ -143,8 +143,8 @@ + re_filter_comp = re.compile(r"{}".format(re_filter)) + + # Filter out symbol from the split line (`sym_split` in the for loop below). +- sym_line_filter = r".*\s+\| (.*) \(.*" +- sym_line_filter_anomaly = r".*\s+\| (.*)" ++ sym_line_filter = r".*\s+\| (.*?) \(.*" ++ sym_line_filter_anomaly = r".*\s+\| (.*?)" + + for sym_line in sym_split: + if re_filter_comp.search(sym_line): diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 9f23757e83d..5c35a6e2c62 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -143,6 +143,19 @@ + + maven-resources-plugin + 3.1.0 + + + javacpp-parser + generate-sources + + resources + + + + maven-compiler-plugin 3.8.0 @@ -212,6 +225,11 @@ ${project.basedir}/ ${project.basedir}/bazel-${project.artifactId}/external/org_tensorflow/ + ${project.basedir}/bazel-bin/external/org_tensorflow/ + ${project.basedir}/bazel-${project.artifactId}/external/com_google_absl/ + ${project.basedir}/bazel-${project.artifactId}/external/eigen_archive/ + ${project.basedir}/bazel-${project.artifactId}/external/com_google_protobuf/src/ + ${project.basedir}/target/classes/org/tensorflow/internal/c_api/include/ ${project.basedir}/bazel-bin/external/llvm_openmp/ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 8da481a6c72..4397483d606 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -367,10 +367,10 @@ public final class Ops { public final SparseOps sparse; - public final BitwiseOps bitwise; - public final TpuOps tpu; + public final BitwiseOps bitwise; + public final MathOps math; public final AudioOps audio; @@ -383,7 +383,7 @@ public final class Ops { private final Scope scope; - private Ops(Scope scope) { + Ops(Scope scope) { this.scope = scope; nn = new NnOps(this); summary = new SummaryOps(this); @@ -398,8 +398,8 @@ private Ops(Scope scope) { random = new RandomOps(this); strings = new StringsOps(this); sparse = new SparseOps(this); - bitwise = new BitwiseOps(this); tpu = new TpuOps(this); + bitwise = new BitwiseOps(this); math = new MathOps(this); audio = new AudioOps(this); signal = new SignalOps(this); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java new file mode 100644 index 00000000000..df5ceb98746 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java @@ -0,0 +1,25 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +/** GradFunc is the signature for all gradient functions in GradOpRegistry. + * Implementations should add operations to compute the gradient outputs of + * 'op' (returned in 'grad_outputs') using 'scope' and 'grad_inputs'. */ +@Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class GradFunc extends FunctionPointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GradFunc(Pointer p) { super(p); } + protected GradFunc() { allocate(); } + private native void allocate(); + public native @ByVal NativeStatus call(@Const @ByRef TF_Scope scope, @Const @ByRef NativeOperation op, + @Const @ByRef NativeOutputVector grad_inputs, + NativeOutputVector grad_outputs); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java new file mode 100644 index 00000000000..7a1d69bafbd --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +/** GradOpRegistry maintains a static registry of gradient functions. + * Gradient functions are indexed in the registry by the forward op name (i.e. + * "MatMul" -> MatMulGrad func). */ +@Namespace("tensorflow::ops") @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class GradOpRegistry extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public GradOpRegistry() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public GradOpRegistry(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GradOpRegistry(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public GradOpRegistry position(long position) { + return (GradOpRegistry)super.position(position); + } + @Override public GradOpRegistry getPointer(long i) { + return new GradOpRegistry((Pointer)this).offsetAddress(i); + } + + /** Registers 'func' as the gradient function for 'op'. + * Returns true if registration was successful, check fails otherwise. */ + public native @Cast("bool") boolean Register(@StdString BytePointer op, GradFunc func); + public native @Cast("bool") boolean Register(@StdString String op, GradFunc func); + + /** Sets 'func' to the gradient function for 'op' and returns Status OK if + * the gradient function for 'op' exists in the registry. + * Note that 'func' can be null for ops that have registered no-gradient with + * the registry. + * Returns error status otherwise. */ + public native @ByVal NativeStatus Lookup(@StdString BytePointer op, @ByPtrPtr GradFunc func); + public native @ByVal NativeStatus Lookup(@StdString String op, @ByPtrPtr GradFunc func); + + /** Returns a pointer to the global gradient function registry. */ + public static native GradOpRegistry Global(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java new file mode 100644 index 00000000000..0be7fab2798 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java @@ -0,0 +1,41 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + +@Name("std::unordered_map") @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NameMap extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NameMap(Pointer p) { super(p); } + public NameMap() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef NameMap put(@ByRef NameMap x); + + public boolean empty() { return size() == 0; } + public native long size(); + + @Index public native Node get(@StdString BytePointer i); + public native NameMap put(@StdString BytePointer i, Node value); + + public native void erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); + public native @Name("operator *().second") @MemberGetter @Const Node second(); + } + + public native long erase(@StdString BytePointer key); +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java new file mode 100644 index 00000000000..b947a4b322f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java @@ -0,0 +1,18 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +@Name("tensorflow::Graph") @Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NativeGraphPointer extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public NativeGraphPointer() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NativeGraphPointer(Pointer p) { super(p); } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java new file mode 100644 index 00000000000..af771a0aa12 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +/** \addtogroup core + * \{ +

+ * Represents a node in the computation graph. */ +@Name("tensorflow::Operation") @NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NativeOperation extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NativeOperation(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public NativeOperation(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public NativeOperation position(long position) { + return (NativeOperation)super.position(position); + } + @Override public NativeOperation getPointer(long i) { + return new NativeOperation((Pointer)this).offsetAddress(i); + } + + public NativeOperation() { super((Pointer)null); allocate(); } + private native void allocate(); + public NativeOperation(Node n) { super((Pointer)null); allocate(n); } + private native void allocate(Node n); + + + + + + + + + + public native Node node(); + + + + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NativeOperation other); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java new file mode 100644 index 00000000000..55bcac60f5b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +/** Represents a tensor value produced by an Operation. */ +@Name("tensorflow::Output") @NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NativeOutput extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NativeOutput(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public NativeOutput(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public NativeOutput position(long position) { + return (NativeOutput)super.position(position); + } + @Override public NativeOutput getPointer(long i) { + return new NativeOutput((Pointer)this).offsetAddress(i); + } + + public NativeOutput() { super((Pointer)null); allocate(); } + private native void allocate(); + public NativeOutput(Node n) { super((Pointer)null); allocate(n); } + private native void allocate(Node n); + public NativeOutput(Node n, @Cast("tensorflow::int32") int index) { super((Pointer)null); allocate(n, index); } + private native void allocate(Node n, @Cast("tensorflow::int32") int index); + public NativeOutput(@Const @ByRef NativeOperation op, @Cast("tensorflow::int32") int index) { super((Pointer)null); allocate(op, index); } + private native void allocate(@Const @ByRef NativeOperation op, @Cast("tensorflow::int32") int index); + + public native @ByVal NativeOperation op(); + public native Node node(); + public native @Cast("tensorflow::int32") int index(); + + public native @StdString BytePointer name(); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NativeOutput other); + + +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java new file mode 100644 index 00000000000..9dc9dd5b971 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java @@ -0,0 +1,79 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + +@Name("std::vector") @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NativeOutputVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NativeOutputVector(Pointer p) { super(p); } + public NativeOutputVector(NativeOutput value) { this(1); put(0, value); } + public NativeOutputVector(NativeOutput ... array) { this(array.length); put(array); } + public NativeOutputVector() { allocate(); } + public NativeOutputVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef NativeOutputVector put(@ByRef NativeOutputVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + @Index(function = "at") public native @ByRef NativeOutput get(@Cast("size_t") long i); + public native NativeOutputVector put(@Cast("size_t") long i, NativeOutput value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef NativeOutput value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const NativeOutput get(); + } + + public NativeOutput[] get() { + NativeOutput[] array = new NativeOutput[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public NativeOutput pop_back() { + long size = size(); + NativeOutput value = get(size - 1); + resize(size - 1); + return value; + } + public NativeOutputVector push_back(NativeOutput value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public NativeOutputVector put(NativeOutput value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public NativeOutputVector put(NativeOutput ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java new file mode 100644 index 00000000000..7e21aae659b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java @@ -0,0 +1,94 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + +// #endif + +/** \ingroup core + * Denotes success or failure of a call in Tensorflow. */ +@Name("tensorflow::Status") @NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NativeStatus extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NativeStatus(Pointer p) { super(p); } + + /** Create a success status. */ + + /** \brief Create a status with the specified error code and msg as a + * human-readable string containing more detailed information. */ + + /** \brief Create a status with the specified error code, msg, and stack trace + * as a human-readable string containing more detailed information. */ +// #ifndef SWIG +// #endif + + /** Copy the specified status. */ + public native @ByRef @Name("operator =") NativeStatus put(@Const @ByRef NativeStatus s); +// #ifndef SWIG +// #endif // SWIG + + public static native @ByVal NativeStatus OK(); + + /** Returns true iff the status indicates success. */ + public native @Cast("bool") boolean ok(); + + + + public native @StdString BytePointer error_message(); + + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NativeStatus x); + + /// + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NativeStatus x); + + /** \brief If {@code ok()}, stores {@code new_status} into {@code *this}. If {@code !ok()}, + * preserves the current status, but may augment with additional + * information about {@code new_status}. + * + * Convenient way of keeping track of the first error encountered. + * Instead of: + * {@code if (overall_status.ok()) overall_status = new_status} + * Use: + * {@code overall_status.Update(new_status);} */ + public native void Update(@Const @ByRef NativeStatus new_status); + + /** \brief Return a string representation of this status suitable for + * printing. Returns the string {@code "OK"} for success. */ + public native @StdString BytePointer ToString(); + + // Ignores any errors. This method does nothing except potentially suppress + // complaints from any tools that are checking that errors are not dropped on + // the floor. + public native void IgnoreError(); + + // The Payload-related APIs are cloned from absl::Status. + // + // Returns the payload of a status given its unique `type_url` key, if + // present. Returns an empty StringPiece if the status is ok, or if the key is + // not present. + + + // Sets the payload for a non-ok status using a `type_url` key, overwriting + // any existing payload for that `type_url`. + // + // This function does nothing if the Status is ok. + + + // Erases the payload corresponding to the `type_url` key. Returns `true` if + // the payload was present. + + + // Returns all the payload information. + // Returns an empty result if status is ok. + + + // Copies all the payloads using the input and discards existing payloads. + // Does nothing if status is ok or 'payloads' is empty. + +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java new file mode 100644 index 00000000000..f3baf914963 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java @@ -0,0 +1,155 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + +// Parsed from tensorflow/core/graph/graph.h + +@Name("tensorflow::Node") @NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class Node extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Node(Pointer p) { super(p); } + + public native @StdString BytePointer DebugString(); + public native int id(); + public native int cost_id(); + public native @StdString BytePointer name(); + public native void set_name(@StdString BytePointer name); + public native void set_name(@StdString String name); + public native @StdString BytePointer type_string(); + + // def() provides the NodeDef the user supplied, but the specifics + // of this Node may have changed due to placement, optimization, etc. + // In particular: + // * def().name() will match name(); + // * def().op() will match type_string() and op_def().name(); + // * def().input() is not reliable, use "in_edges()" below instead; + // * def().device() is the "user's requested device" and may not match + // the actual assigned device, see assigned_device_name() below; + // * def().attr() is authoritative. + // TODO(irving): Replace with NodeInfo. + + // TODO(mdan): This is only used by control_flow_deps_o_chains. Remove? + + // input and output types + public native @Cast("tensorflow::int32") int num_inputs(); + + public native @Cast("tensorflow::int32") int num_outputs(); + + // The device requested by the user. For the actual assigned device, + // use assigned_device_name() below. + public native @StdString BytePointer requested_device(); + + // This changes the user requested device but not necessarily the device that + // on which the operation will run. + public native void set_requested_device(@StdString BytePointer device); + public native void set_requested_device(@StdString String device); + + // This gives the device the runtime has assigned this node to. If + // you want the device the user requested, use def().device() instead. + // TODO(josh11b): Validate that the assigned_device, if not empty: + // fully specifies a device, and satisfies def().device(). + // TODO(josh11b): Move assigned_device_name outside of Node into a + // NodeId->DeviceName map. + public native @StdString BytePointer assigned_device_name(); + public native void set_assigned_device_name(@StdString BytePointer device_name); + public native void set_assigned_device_name(@StdString String device_name); + public native @Cast("bool") boolean has_assigned_device_name(); + public native int assigned_device_name_index(); + public native void set_assigned_device_name_index(int index); + + // Sets 'original_node_names' field of this node's DebugInfo proto to + // 'names'. + + + // Read only access to attributes + + // Inputs requested by the NodeDef. For the actual inputs, use in_edges. + + // Get the neighboring nodes via edges either in or out of this node. This + // includes control edges. + + // Node type helpers. + public native @Cast("bool") boolean IsSource(); + public native @Cast("bool") boolean IsSink(); + // Anything other than the special Source & Sink nodes. + public native @Cast("bool") boolean IsOp(); + + // Node class helpers + public native @Cast("bool") boolean IsSwitch(); + public native @Cast("bool") boolean IsMerge(); + public native @Cast("bool") boolean IsEnter(); + public native @Cast("bool") boolean IsExit(); + public native @Cast("bool") boolean IsNextIteration(); + public native @Cast("bool") boolean IsLoopCond(); + public native @Cast("bool") boolean IsControlTrigger(); + public native @Cast("bool") boolean IsSend(); + public native @Cast("bool") boolean IsRecv(); + public native @Cast("bool") boolean IsConstant(); + public native @Cast("bool") boolean IsVariable(); + public native @Cast("bool") boolean IsIdentity(); + public native @Cast("bool") boolean IsGetSessionHandle(); + public native @Cast("bool") boolean IsGetSessionTensor(); + public native @Cast("bool") boolean IsDeleteSessionTensor(); + public native @Cast("bool") boolean IsControlFlow(); + public native @Cast("bool") boolean IsHostSend(); + public native @Cast("bool") boolean IsHostRecv(); + public native @Cast("bool") boolean IsScopedAllocator(); + public native @Cast("bool") boolean IsCollective(); + + public native @Cast("bool") boolean IsMetadata(); + public native @Cast("bool") boolean IsFakeParam(); + public native @Cast("bool") boolean IsPartitionedCall(); + + // Returns true if this node is any kind of function call node. + // + // NOTE: "function call nodes" include partitioned call ops, symbolic gradient + // ops, and ops whose type_string is the name of a function ("function ops"). + public native @Cast("bool") boolean IsFunctionCall(); + + public native @Cast("bool") boolean IsIfNode(); + public native @Cast("bool") boolean IsWhileNode(); + public native @Cast("bool") boolean IsCaseNode(); + // Is this node a function input + public native @Cast("bool") boolean IsArg(); + // Is this node a function output + public native @Cast("bool") boolean IsRetval(); + + public native @Cast("bool") boolean IsDistributedCommunication(); + + + + + + + + // Returns into '*e' the edge connecting to the 'idx' input of this Node. + + // Returns into '*edges' the input data edges of this Node, indexed by input + // number. Does not return control edges. + + // Returns into '*n' the node that has an output connected to the + // 'idx' input of this Node. + + + + // Returns into '*t' the idx-th input tensor of this node, represented as the + // output tensor of input_node(idx). + + // Sets the stack trace for the node. Assumes that getting and setting the + // stack trace for a given node will not race. + + + // Get the stack trace for when the node was instantiated. + + + // Called after an attr has changed. Decides whether we need to update some + // property of the node (stored in props_). + public native void UpdateProperties(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java new file mode 100644 index 00000000000..5922ff38b0a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java @@ -0,0 +1,17 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + +@Namespace("tensorflow") @Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class NodeBuilder extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public NodeBuilder() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NodeBuilder(Pointer p) { super(p); } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java index f4521d36625..c4d88baf176 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java @@ -8,19 +8,42 @@ import static org.tensorflow.internal.c_api.global.tensorflow.*; +// Parsed from tensorflow/c/c_api_internal.h -// TODO(jeff,sanjay): -// - export functions to set Config fields - -// -------------------------------------------------------------------------- -// The new graph construction API, still under development. - -// Represents a computation graph. Graphs may be shared between sessions. -// Graphs are thread-safe when used as directed below. -@Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +@NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) public class TF_Graph extends org.tensorflow.internal.c_api.AbstractTF_Graph { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public TF_Graph() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Graph(Pointer p) { super(p); } + + + + public native @MemberGetter @ByRef NativeGraphPointer graph(); + + // Runs shape inference. + + + // Maps from name of an operation to the Node* in 'graph'. + public native @ByRef NameMap name_map(); public native TF_Graph name_map(NameMap setter); + + // The keys of this map are all the active sessions using this graph. Each + // value records whether the graph has been mutated since the corresponding + // session has been run (this is detected in RecordMutation function). If the + // string is empty, no mutation has occurred. Otherwise the string is a + // description of the mutation suitable for returning to the user. + // + // Sessions are added to this map in TF_NewSession, and removed in + // TF_DeleteSession. + // TF_Graph may only / must be deleted when + // sessions.size() == 0 && delete_requested == true + // + // TODO(b/74949947): mutations currently trigger a warning instead of a bad + // status, this should be reverted when possible. + + // set true by TF_DeleteGraph + + // Used to link graphs contained in TF_WhileParams to the parent graph that + // will eventually contain the full while loop. + public native TF_Graph parent(); public native TF_Graph parent(TF_Graph setter); + public native TF_Output parent_inputs(); public native TF_Graph parent_inputs(TF_Output setter); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java index 4daad4f8a2a..96e5ef47b38 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java @@ -8,14 +8,11 @@ import static org.tensorflow.internal.c_api.global.tensorflow.*; - -// Operation that has been added to the graph. Valid until the graph is -// deleted -- in particular adding a new operation to the graph does not -// invalidate old TF_Operation* pointers. -@Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +@Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) public class TF_Operation extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public TF_Operation() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Operation(Pointer p) { super(p); } + + public native @MemberGetter @ByRef Node node(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java index 490ca238753..71738f4ac02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java @@ -8,12 +8,14 @@ import static org.tensorflow.internal.c_api.global.tensorflow.*; - -// Operation being built. The underlying graph must outlive this. -@Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +@NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) public class TF_OperationDescription extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public TF_OperationDescription() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_OperationDescription(Pointer p) { super(p); } + + + public native @ByRef NodeBuilder node_builder(); public native TF_OperationDescription node_builder(NodeBuilder setter); + public native TF_Graph graph(); public native TF_OperationDescription graph(TF_Graph setter); + } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java new file mode 100644 index 00000000000..bea7ec98a25 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java @@ -0,0 +1,216 @@ +// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE + +package org.tensorflow.internal.c_api; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.tensorflow.internal.c_api.global.tensorflow.*; + + +/** \addtogroup core + * \{ +

+ * A {@code Scope} object represents a set of related TensorFlow ops that have the + * same properties such as a common name prefix. + * + * A Scope object is a container for TensorFlow Op properties. Op constructors + * get a Scope object as a mandatory first argument and the constructed op + * acquires the properties in the object. + * + * A simple example: + * + * using namespace ops; + * Scope root = Scope::NewRootScope(); + * auto c1 = Const(root, { {1, 1} }); + * auto m = MatMul(root, c1, { {41}, {1} }); + * GraphDef gdef; + * Status s = root.ToGraphDef(&gdef); + * if (!s.ok()) { ... } + * + * Scope hierarchy: + * + * The Scope class provides various With<> functions that create a new scope. + * The new scope typically has one property changed while other properties are + * inherited from the parent scope. + * NewSubScope(name) method appends {@code name} to the prefix of names for ops + * created within the scope, and WithOpName() changes the suffix which + * otherwise defaults to the type of the op. + * + * Name examples: + * + * Scope root = Scope::NewRootScope(); + * Scope linear = root.NewSubScope("linear"); + * // W will be named "linear/W" + * auto W = Variable(linear.WithOpName("W"), + * {2, 2}, DT_FLOAT); + * // b will be named "linear/b_3" + * int idx = 3; + * auto b = Variable(linear.WithOpName("b_", idx), + * {2}, DT_FLOAT); + * auto x = Const(linear, {...}); // name: "linear/Const" + * auto m = MatMul(linear, x, W); // name: "linear/MatMul" + * auto r = BiasAdd(linear, m, b); // name: "linear/BiasAdd" + * + * Scope lifetime: + * + * A new scope is created by calling Scope::NewRootScope. This creates some + * resources that are shared by all the child scopes that inherit from this + * scope, directly or transitively. For instance, a new scope creates a new + * Graph object to which operations are added when the new scope or its + * children are used by an Op constructor. The new scope also has a Status + * object which will be used to indicate errors by Op-constructor functions + * called on any child scope. The Op-constructor functions have to check the + * scope's status by calling the ok() method before proceeding to construct the + * op. + * + * Thread safety: + * + * A {@code Scope} object is NOT thread-safe. Threads cannot concurrently call + * op-constructor functions on the same {@code Scope} object. */ +@Name("tensorflow::Scope") @NoOffset @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) +public class TF_Scope extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TF_Scope(Pointer p) { super(p); } + + public TF_Scope(@Const @ByRef TF_Scope other) { super((Pointer)null); allocate(other); } + private native void allocate(@Const @ByRef TF_Scope other); + public native @ByRef @Name("operator =") TF_Scope put(@Const @ByRef TF_Scope other); + + // The following functions are for users making graphs. They return brand new + // scopes, or scopes derived from an existing scope object. + + /** Return a new scope. + * This creates a new graph and all operations constructed in this graph + * should use the returned object as the "root" scope. */ + public static native @ByVal TF_Scope NewRootScope(); + + /** Return a new scope. Ops created with this scope will have + * {@code name/child_scope_name} as the prefix. The actual name will be unique + * in the current scope. All other properties are inherited from the current + * scope. If {@code child_scope_name} is empty, the {@code /} is elided. */ + public native @ByVal TF_Scope NewSubScope(@StdString BytePointer child_scope_name); + public native @ByVal TF_Scope NewSubScope(@StdString String child_scope_name); + + /** Return a new scope. All ops created within the returned scope will have + * names of the form {@code name/StrCat(fragments...)[_suffix]} */ + + /** Return a new scope. All ops created within the returned scope will have as + * control dependencies the union of operations in the control_deps vector + * and the control dependencies of the current scope. */ + public native @ByVal TF_Scope WithControlDependencies( + @Span NativeOperation control_deps); + /** Same as above, but convenient to add control dependency on the operation + * producing the control_dep output. */ + public native @ByVal TF_Scope WithControlDependencies(@Const @ByRef NativeOutput control_dep); + + /** Return a new scope. All ops created within the returned scope will have no + * control dependencies on other operations. */ + public native @ByVal TF_Scope WithNoControlDependencies(); + + /** Return a new scope. All ops created within the returned scope will have + * the device field set to 'device'. */ + public native @ByVal TF_Scope WithDevice(@StdString BytePointer device); + public native @ByVal TF_Scope WithDevice(@StdString String device); + + /** Returns a new scope. All ops created within the returned scope will have + * their assigned device set to {@code assigned_device}. */ + + + /** Returns a new scope. All ops created within the returned scope will have + * their _XlaCluster attribute set to {@code xla_cluster}. */ + + + /** Return a new scope. All ops created within the returned scope will be + * co-located on the device where op is placed. + * NOTE: This function is intended to be use internal libraries only for + * controlling placement of ops on to devices. Public use is not encouraged + * because the implementation of device placement is subject to change. */ + + /** Convenience function for above. */ + + /** Clear all colocation constraints. */ + + + /** Return a new scope. The op-constructor functions taking the returned scope + * as the scope argument will exit as soon as an error is detected, instead + * of setting the status on the scope. */ + public native @ByVal TF_Scope ExitOnError(); + + /** Return a new scope. All ops created with the new scope will have + * kernel_label as the value for their '_kernel' attribute; */ + + + // The following functions are for scope object consumers. + + /** Return a unique name, using default_name if an op name has not been + * specified. */ + public native @StdString BytePointer GetUniqueNameForOp(@StdString BytePointer default_name); + public native @StdString String GetUniqueNameForOp(@StdString String default_name); + + /** Update the status on this scope. + * Note: The status object is shared between all children of this scope. + * If the resulting status is not Status::OK() and exit_on_error_ is set on + * this scope, this function exits by calling LOG(FATAL). */ + + + // START_SKIP_DOXYGEN + + /** Update the builder with properties accumulated in this scope. Does not set + * status(). */ + // TODO(skyewm): NodeBuilder is not part of public API + public native void UpdateBuilder(NodeBuilder builder); + // END_SKIP_DOXYGEN + + public native @Cast("bool") boolean ok(); + + // TODO(skyewm): Graph is not part of public API + public native NativeGraphPointer graph(); + + // TODO(skyewm): Graph is not part of public API + + + + + /** If status() is Status::OK(), convert the Graph object stored in this scope + * to a GraphDef proto and return Status::OK(). Otherwise, return the error + * status as is without performing GraphDef conversion. */ + + + // START_SKIP_DOXYGEN + + /** If status() is Status::OK(), construct a Graph object using {@code opts} as the + * GraphConstructorOptions, and return Status::OK if graph construction was + * successful. Otherwise, return the error status. */ + // TODO(josh11b, keveman): Make this faster; right now it converts + // Graph->GraphDef->Graph. This cleans up the graph (e.g. adds + // edges from the source and to the sink node, resolves back edges + // by name), and makes sure the resulting graph is valid. + + + // Calls AddNode() using this scope's ShapeRefiner. This exists in the public + // API to prevent custom op wrappers from needing access to shape_refiner.h or + // scope_internal.h. + // TODO(skyewm): remove this from public API + + + // Creates a new root scope that causes all DoShapeInference() calls to return + // Status::OK() (on the returned scope and any subscopes). Used for testing. + // TODO(skyewm): fix tests that still require this and eventually remove, or + // at least remove from public API + + // END_SKIP_DOXYGEN + + + + // START_SKIP_DOXYGEN + @Opaque public static class Impl extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public Impl() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Impl(Pointer p) { super(p); } + } + public native Impl impl(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java index 56ed191ae28..3128389e255 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java @@ -11,6 +11,12 @@ public class tensorflow extends org.tensorflow.internal.c_api.presets.tensorflow { static { Loader.load(); } +// Targeting ../NativeOutputVector.java + + +// Targeting ../NameMap.java + + // Parsed from tensorflow/core/platform/ctstring_internal.h /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. @@ -982,9 +988,15 @@ public static native void TF_SetConfig(TF_SessionOptions options, // Destroy an options object. public static native void TF_DeleteSessionOptions(TF_SessionOptions arg0); -// Targeting ../TF_Graph.java +// TODO(jeff,sanjay): +// - export functions to set Config fields + +// -------------------------------------------------------------------------- +// The new graph construction API, still under development. +// Represents a computation graph. Graphs may be shared between sessions. +// Graphs are thread-safe when used as directed below. // Return a new graph object. public static native TF_Graph TF_NewGraph(); @@ -992,12 +1004,12 @@ public static native void TF_SetConfig(TF_SessionOptions options, // Destroy an options object. Graph will be deleted once no more // TFSession's are referencing it. public static native void TF_DeleteGraph(TF_Graph arg0); -// Targeting ../TF_OperationDescription.java - - -// Targeting ../TF_Operation.java +// Operation being built. The underlying graph must outlive this. +// Operation that has been added to the graph. Valid until the graph is +// deleted -- in particular adding a new operation to the graph does not +// invalidate old TF_Operation* pointers. // Targeting ../TF_Input.java @@ -1079,6 +1091,15 @@ public static native void TF_GraphGetTensorShape(TF_Graph graph, @Cast("int64_t*") long[] dims, int num_dims, TF_Status status); +// TF_NewOperation, but without locking the graph. +// Should prefer TF_NewOperation when possible. +public static native TF_OperationDescription TF_NewOperationLocked(TF_Graph graph, + @Cast("const char*") BytePointer op_type, + @Cast("const char*") BytePointer oper_name); +public static native TF_OperationDescription TF_NewOperationLocked(TF_Graph graph, + String op_type, + String oper_name); + // Operation will only be added to *graph when TF_FinishOperation() is // called (assuming TF_FinishOperation() does not return an error). // *graph must not be deleted until after TF_FinishOperation() is @@ -1418,6 +1439,11 @@ public static native void TF_SetAttrValueProto(TF_OperationDescription desc, @Cast("size_t") long proto_len, TF_Status status); +// TF_FinishOperation, but without locking the graph. +// TF_FinishOperation should be preferred when possible. +public static native TF_Operation TF_FinishOperationLocked(TF_OperationDescription desc, + TF_Status status); + // If this function succeeds: // * *status is set to an OK value, // * a TF_Operation is added to the graph, @@ -4084,6 +4110,26 @@ public static native void TF_ShapeInferenceContextConcatenateShapes( // #endif // TENSORFLOW_C_OPS_H_ +// Parsed from tensorflow_adapters.h + +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ + +// #include "absl/types/span.h" + + + // Parsed from tensorflow/c/eager/c_api.h /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. @@ -4783,4 +4829,306 @@ public static native void TFE_OpSetAttrValueProto(@Const TFE_Op op, public static final int TFE_CUSTOM_DEVICE_VERSION = 4; +// Parsed from tensorflow/cc/framework/scope.h + +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ +// #define TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ + +// #include +// #include +// #include +// #include +// #include + +// #include "absl/strings/str_cat.h" +// #include "tensorflow/cc/framework/ops.h" +// #include "tensorflow/core/common_runtime/graph_constructor.h" +// #include "tensorflow/core/lib/core/status.h" +// #include "tensorflow/core/lib/gtl/array_slice.h" +// Targeting ../NativeGraphPointer.java + + +// Targeting ../NodeBuilder.java + + +// Targeting ../TF_Scope.java + + + +/** A helper struct to hold the scopes that would be used by a function + * constructing a composite op. */ + +// Creates a node of the given operation, with the given inputs, and assigns the +// result to output. This does not support the ability to add additional +// attributes. + +/** \} */ + + // namespace tensorflow + +// #endif // TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ + + +// Parsed from tensorflow/cc/framework/grad_op_registry.h + +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_CC_FRAMEWORK_GRAD_OP_REGISTRY_H_ +// #define TENSORFLOW_CC_FRAMEWORK_GRAD_OP_REGISTRY_H_ + +// #include + +// #include "tensorflow/cc/framework/ops.h" +// #include "tensorflow/cc/framework/scope.h" +// Targeting ../GradFunc.java + + +// Targeting ../GradOpRegistry.java + + + + // namespace ops + +// Macros used to define gradient functions for ops. +// #define REGISTER_GRADIENT_OP(name, fn) +// REGISTER_GRADIENT_OP_UNIQ_HELPER(__COUNTER__, name, fn) + +// #define REGISTER_NO_GRADIENT_OP(name) +// REGISTER_GRADIENT_OP_UNIQ_HELPER(__COUNTER__, name, nullptr) + +// #define REGISTER_GRADIENT_OP_UNIQ_HELPER(ctr, name, fn) +// REGISTER_GRADIENT_OP_UNIQ(ctr, name, fn) + +// #define REGISTER_GRADIENT_OP_UNIQ(ctr, name, fn) +// static bool unused_ret_val_##ctr = +// ::tensorflow::ops::GradOpRegistry::Global()->Register(name, fn) + + // namespace tensorflow + +// #endif // TENSORFLOW_CC_FRAMEWORK_GRAD_OP_REGISTRY_H_ + + +// Parsed from tensorflow/core/platform/status.h + +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_CORE_PLATFORM_STATUS_H_ +// #define TENSORFLOW_CORE_PLATFORM_STATUS_H_ + +// #include +// #include +// #include +// #include +// #include + +// #include "tensorflow/core/platform/logging.h" +// #include "tensorflow/core/platform/macros.h" +// #include "tensorflow/core/platform/stack_frame.h" +// #include "tensorflow/core/platform/stringpiece.h" +// #include "tensorflow/core/platform/types.h" +// #include "tensorflow/core/protobuf/error_codes.pb.h" + +// #if defined(__clang__) +// Only clang supports warn_unused_result as a type annotation. +// Targeting ../NativeStatus.java + + + +// Helper class to manage multiple child status values. + + + + + +// #ifndef SWIG + + + +// #endif // SWIG + + + + + +/** \ingroup core */ +@Namespace("tensorflow") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef NativeStatus x); + +@Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelperOutOfLine( + @Const @ByRef NativeStatus v, @Cast("const char*") BytePointer msg); +@Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelperOutOfLine( + @Const @ByRef NativeStatus v, String msg); + +@Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelper(@ByVal NativeStatus v, + @Cast("const char*") BytePointer msg); +@Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelper(@ByVal NativeStatus v, + String msg); + +// #define TF_DO_CHECK_OK(val, level) +// while (auto _result = ::tensorflow::TfCheckOpHelper(val, #val)) +// LOG(level) << *(_result) + +// #define TF_CHECK_OK(val) TF_DO_CHECK_OK(val, FATAL) +// #define TF_QCHECK_OK(val) TF_DO_CHECK_OK(val, QFATAL) + +// DEBUG only version of TF_CHECK_OK. Compiler still parses 'val' even in opt +// mode. +// #ifndef NDEBUG +// #define TF_DCHECK_OK(val) TF_CHECK_OK(val) +// #else +// #define TF_DCHECK_OK(val) +// while (false && (::tensorflow::Status::OK() == (val))) LOG(FATAL) +// #endif + + // namespace tensorflow + +// #endif // TENSORFLOW_CORE_PLATFORM_STATUS_H_ + + +// Targeting ../Node.java + + + +// Stores debug information associated with the Node. + + +// Parsed from tensorflow/c/tf_status_helper.h + +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_C_TF_STATUS_HELPER_H_ +// #define TENSORFLOW_C_TF_STATUS_HELPER_H_ + +// #include "tensorflow/c/tf_status.h" +// #include "tensorflow/core/platform/status.h" + +// Set the attribute of "tf_status" from the attributes of "status". +@Namespace("tensorflow") public static native void Set_TF_Status_from_Status(TF_Status tf_status, + @Const @ByRef NativeStatus status); + +// Returns a "status" from "tf_status". +@Namespace("tensorflow") public static native @ByVal NativeStatus StatusFromTF_Status(@Const TF_Status tf_status); + // namespace internal + + // namespace tensorflow + +// #endif // TENSORFLOW_C_TF_STATUS_HELPER_H_ + + +// Parsed from tensorflow/cc/framework/ops.h + +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// #ifndef TENSORFLOW_CC_FRAMEWORK_OPS_H_ +// #define TENSORFLOW_CC_FRAMEWORK_OPS_H_ + +// #include + +// #include "tensorflow/core/framework/tensor.h" +// #include "tensorflow/core/framework/tensor.pb.h" +// #include "tensorflow/core/graph/graph.h" +// #include "tensorflow/core/lib/hash/hash.h" +// #include "tensorflow/core/lib/strings/strcat.h" + +/** \defgroup core Core Tensorflow API */ +// Targeting ../NativeOperation.java + + +// Targeting ../NativeOutput.java + + + +/** Hash class that can be used for e.g. storing Outputs in an unordered_map */ + +/** Represents a tensor value that can be used as an operand to an Operation. */ + +/** A type for representing the output of ops that produce more than one output, + * or a list of tensors. */ + +/** A type for representing the input to ops that require a list of tensors. */ + +/** \} */ + + // namespace tensorflow + +// #endif // TENSORFLOW_CC_FRAMEWORK_OPS_H_ + + +// Targeting ../TF_Graph.java + + +// Targeting ../TF_OperationDescription.java + + +// Targeting ../TF_Operation.java + + + + } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java index 12fca67165e..4bc3f61c197 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/AudioSpectrogram.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -54,6 +56,10 @@ * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. */ +@OpMetadata( + opType = AudioSpectrogram.OP_NAME, + inputsClass = AudioSpectrogram.Inputs.class +) @Operator( group = "audio" ) @@ -65,8 +71,8 @@ public final class AudioSpectrogram extends RawOp implements Operand { private Output spectrogram; - private AudioSpectrogram(Operation operation) { - super(operation); + public AudioSpectrogram(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; spectrogram = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options magnitudeSquared(Boolean magnitudeSquared) { } } + @OpInputsMetadata( + outputsClass = AudioSpectrogram.class + ) public static class Inputs extends RawOpInputs { /** * Float representation of audio data. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java index 0caa473a0db..1666d475d57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/DecodeWav.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * number of samples. For example, a ten-sample-long stereo WAV file should give an * output shape of [10, 2]. */ +@OpMetadata( + opType = DecodeWav.OP_NAME, + inputsClass = DecodeWav.Inputs.class +) @Operator( group = "audio" ) @@ -59,8 +65,8 @@ public final class DecodeWav extends RawOp { private Output sampleRate; - private DecodeWav(Operation operation) { - super(operation); + public DecodeWav(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; audio = operation.output(outputIdx++); sampleRate = operation.output(outputIdx++); @@ -165,6 +171,9 @@ public Options desiredSamples(Long desiredSamples) { } } + @OpInputsMetadata( + outputsClass = DecodeWav.class + ) public static class Inputs extends RawOpInputs { /** * The WAV-encoded audio, usually from a file. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java index 8e535d1d48b..ecbafeb2d4e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/EncodeWav.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ *

{@code audio} is a 2-D float Tensor of shape {@code [length, channels]}. * {@code sample_rate} is a scalar Tensor holding the rate to use (e.g. 44100). */ +@OpMetadata( + opType = EncodeWav.OP_NAME, + inputsClass = EncodeWav.Inputs.class +) @Operator( group = "audio" ) @@ -52,8 +58,8 @@ public final class EncodeWav extends RawOp implements Operand { private Output contents; - private EncodeWav(Operation operation) { - super(operation); + public EncodeWav(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; contents = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return contents; } + @OpInputsMetadata( + outputsClass = EncodeWav.class + ) public static class Inputs extends RawOpInputs { /** * 2-D with shape {@code [length, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java index b75d638879c..cf1f13ea4ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/audio/Mfcc.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum * is a good resource to learn more. */ +@OpMetadata( + opType = Mfcc.OP_NAME, + inputsClass = Mfcc.Inputs.class +) @Operator( group = "audio" ) @@ -51,8 +57,8 @@ public final class Mfcc extends RawOp implements Operand { private Output output; - private Mfcc(Operation operation) { - super(operation); + public Mfcc(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -212,6 +218,9 @@ public Options dctCoefficientCount(Long dctCoefficientCount) { } } + @OpInputsMetadata( + outputsClass = Mfcc.class + ) public static class Inputs extends RawOpInputs { /** * Typically produced by the Spectrogram op, with magnitude_squared diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java index 0a208566534..75e1d6e8a5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = BitwiseAnd.OP_NAME, + inputsClass = BitwiseAnd.Inputs.class +) @Operator( group = "bitwise" ) @@ -64,8 +70,8 @@ public final class BitwiseAnd extends RawOp implements Operan private Output z; - private BitwiseAnd(Operation operation) { - super(operation); + public BitwiseAnd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -103,6 +109,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = BitwiseAnd.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java index ba212ec5596..28865bc0cc8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = BitwiseOr.OP_NAME, + inputsClass = BitwiseOr.Inputs.class +) @Operator( group = "bitwise" ) @@ -64,8 +70,8 @@ public final class BitwiseOr extends RawOp implements Operand private Output z; - private BitwiseOr(Operation operation) { - super(operation); + public BitwiseOr(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -103,6 +109,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = BitwiseOr.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java index 57248c5cc6d..e4655d95f8f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = BitwiseXor.OP_NAME, + inputsClass = BitwiseXor.Inputs.class +) @Operator( group = "bitwise" ) @@ -64,8 +70,8 @@ public final class BitwiseXor extends RawOp implements Operan private Output z; - private BitwiseXor(Operation operation) { - super(operation); + public BitwiseXor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -103,6 +109,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = BitwiseXor.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java index f5f2765eb33..4dd4375304c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -74,6 +76,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Invert.OP_NAME, + inputsClass = Invert.Inputs.class +) @Operator( group = "bitwise" ) @@ -85,8 +91,8 @@ public final class Invert extends RawOp implements Operand private Output y; - private Invert(Operation operation) { - super(operation); + public Invert(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -122,6 +128,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Invert.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java index 6420dcc7f65..0173cdc694d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -64,6 +66,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = LeftShift.OP_NAME, + inputsClass = LeftShift.Inputs.class +) @Operator( group = "bitwise" ) @@ -75,8 +81,8 @@ public final class LeftShift extends RawOp implements Operand private Output z; - private LeftShift(Operation operation) { - super(operation); + public LeftShift(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = LeftShift.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java index 5fa84781682..00990db0ce0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -66,6 +68,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RightShift.OP_NAME, + inputsClass = RightShift.Inputs.class +) @Operator( group = "bitwise" ) @@ -77,8 +83,8 @@ public final class RightShift extends RawOp implements Operan private Output z; - private RightShift(Operation operation) { - super(operation); + public RightShift(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -116,6 +122,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RightShift.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java index 39eb7f70ce5..7c2432bcc34 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KMC2ChainInitialization.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -37,6 +39,10 @@ * of the k-MC^2 algorithm and returns the index of one candidate point to be added * as an additional cluster center. */ +@OpMetadata( + opType = KMC2ChainInitialization.OP_NAME, + inputsClass = KMC2ChainInitialization.Inputs.class +) public final class KMC2ChainInitialization extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class KMC2ChainInitialization extends RawOp implements Operand index; - private KMC2ChainInitialization(Operation operation) { - super(operation); + public KMC2ChainInitialization(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; index = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return index; } + @OpInputsMetadata( + outputsClass = KMC2ChainInitialization.class + ) public static class Inputs extends RawOpInputs { /** * Vector with squared distances to the closest previously sampled cluster center diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java index 65058cc1268..e2d7fc5f79a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/cluster/KmeansPlusPlusInitialization.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -37,6 +39,10 @@ * distance from the nearest row selected thus far till num_to_sample rows have * been sampled. */ +@OpMetadata( + opType = KmeansPlusPlusInitialization.OP_NAME, + inputsClass = KmeansPlusPlusInitialization.Inputs.class +) public final class KmeansPlusPlusInitialization extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class KmeansPlusPlusInitialization extends RawOp implements Operand private Output samples; - private KmeansPlusPlusInitialization(Operation operation) { - super(operation); + public KmeansPlusPlusInitialization(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; samples = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return samples; } + @OpInputsMetadata( + outputsClass = KmeansPlusPlusInitialization.class + ) public static class Inputs extends RawOpInputs { /** * Matrix of shape (n, d). Rows are assumed to be input points. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java index e338973185a..53e72874745 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @deprecated use {@link org.tensorflow.op.collective.Reduce} instead */ +@OpMetadata( + opType = AllReduce.OP_NAME, + inputsClass = AllReduce.Inputs.class +) @Deprecated public final class AllReduce extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class AllReduce extends RawOp implements Operand private Output data; - private AllReduce(Operation operation) { - super(operation); + public AllReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -218,6 +224,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = AllReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java index 76cdc37a0c4..94dbc145772 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = BroadcastRecv.OP_NAME, + inputsClass = BroadcastRecv.Inputs.class +) public final class BroadcastRecv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class BroadcastRecv extends RawOp implements Opera private Output data; - private BroadcastRecv(Operation operation) { - super(operation); + public BroadcastRecv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -156,6 +162,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = BroadcastRecv.class + ) public static class Inputs extends RawOpInputs> { /** * The T attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java index 707032af259..cc18c528581 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = BroadcastSend.OP_NAME, + inputsClass = BroadcastSend.Inputs.class +) public final class BroadcastSend extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class BroadcastSend extends RawOp implements Opera private Output data; - private BroadcastSend(Operation operation) { - super(operation); + public BroadcastSend(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -155,6 +161,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = BroadcastSend.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java index b058a53afea..9140ca240ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = Gather.OP_NAME, + inputsClass = Gather.Inputs.class +) public final class Gather extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class Gather extends RawOp implements Operand private Output data; - private Gather(Operation operation) { - super(operation); + public Gather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -155,6 +161,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = Gather.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java index 77e32799164..2ee7eda0773 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = GatherV2.OP_NAME, + inputsClass = GatherV2.Inputs.class +) public final class GatherV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class GatherV2 extends RawOp implements Operand< private Output data; - private GatherV2(Operation operation) { - super(operation); + public GatherV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -184,6 +190,9 @@ public Options NorderingToken(Long NorderingToken) { } } + @OpInputsMetadata( + outputsClass = GatherV2.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java index 3b86c960d1a..7d503474d02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = Reduce.OP_NAME, + inputsClass = Reduce.Inputs.class +) public final class Reduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class Reduce extends RawOp implements Operand private Output data; - private Reduce(Operation operation) { - super(operation); + public Reduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -215,6 +221,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = Reduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java index da55ef12535..0c09c73f020 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = ReduceV2.OP_NAME, + inputsClass = ReduceV2.Inputs.class +) public final class ReduceV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class ReduceV2 extends RawOp implements Operand< private Output data; - private ReduceV2(Operation operation) { - super(operation); + public ReduceV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -215,6 +221,9 @@ public Options maxSubdivsPerDevice(Long maxSubdivsPerDevice) { } } + @OpInputsMetadata( + outputsClass = ReduceV2.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java index 455cb214124..c51158b43dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Abort.java @@ -25,6 +25,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; /** @@ -33,6 +35,10 @@ * otherwise it will exit with a SIGABORT signal. *

Returns nothing but an exception. */ +@OpMetadata( + opType = Abort.OP_NAME, + inputsClass = Abort.Inputs.class +) @Operator public final class Abort extends RawOp { /** @@ -40,8 +46,8 @@ public final class Abort extends RawOp { */ public static final String OP_NAME = "Abort"; - private Abort(Operation operation) { - super(operation); + public Abort(Operation operation) { + super(operation, OP_NAME); } /** @@ -123,6 +129,9 @@ public Options exitWithoutError(Boolean exitWithoutError) { } } + @OpInputsMetadata( + outputsClass = Abort.class + ) public static class Inputs extends RawOpInputs { /** * A string which is the message associated with the exception. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java index ca17e9a1ba0..81d49962fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/All.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. */ +@OpMetadata( + opType = All.OP_NAME, + inputsClass = All.Inputs.class +) @Operator public final class All extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class All extends RawOp implements Operand { private Output output; - private All(Operation operation) { - super(operation); + public All(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = All.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java index c853cf365ea..8ab3c606ad4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Any.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. */ +@OpMetadata( + opType = Any.OP_NAME, + inputsClass = Any.Inputs.class +) @Operator public final class Any extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class Any extends RawOp implements Operand { private Output output; - private Any(Operation operation) { - super(operation); + public Any(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = Any.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java index fdd9f9628c7..af5c1dd9f9e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssertThat.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -36,6 +38,10 @@ * If {@code condition} evaluates to false, print the list of tensors in {@code data}. * {@code summarize} determines how many entries of the tensors to print. */ +@OpMetadata( + opType = AssertThat.OP_NAME, + inputsClass = AssertThat.Inputs.class +) @Operator public final class AssertThat extends RawOp { /** @@ -43,8 +49,8 @@ public final class AssertThat extends RawOp { */ public static final String OP_NAME = "Assert"; - private AssertThat(Operation operation) { - super(operation); + public AssertThat(Operation operation) { + super(operation, OP_NAME); } /** @@ -105,6 +111,9 @@ public Options summarize(Long summarize) { } } + @OpInputsMetadata( + outputsClass = AssertThat.class + ) public static class Inputs extends RawOpInputs { /** * The condition to evaluate. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java index 9af134e388e..ba1e413af71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = Assign.OP_NAME, + inputsClass = Assign.Inputs.class +) @Operator public final class Assign extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class Assign extends RawOp implements Operand { private Output outputRef; - private Assign(Operation operation) { - super(operation); + public Assign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = Assign.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. May be uninitialized. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java index b863c0aed92..3a6df4bed6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = AssignAdd.OP_NAME, + inputsClass = AssignAdd.Inputs.class +) @Operator public final class AssignAdd extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class AssignAdd extends RawOp implements Operand outputRef; - private AssignAdd(Operation operation) { - super(operation); + public AssignAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = AssignAdd.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java index 618f646be84..e7dadaaec95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAddVariableOp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the incremented value or a subsequent newer one. */ +@OpMetadata( + opType = AssignAddVariableOp.OP_NAME, + inputsClass = AssignAddVariableOp.Inputs.class +) @Operator public final class AssignAddVariableOp extends RawOp { /** @@ -42,8 +48,8 @@ public final class AssignAddVariableOp extends RawOp { */ public static final String OP_NAME = "AssignAddVariableOp"; - private AssignAddVariableOp(Operation operation) { - super(operation); + public AssignAddVariableOp(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static AssignAddVariableOp create(Scope scope, Operand r return new AssignAddVariableOp(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = AssignAddVariableOp.class + ) public static class Inputs extends RawOpInputs { /** * handle to the resource in which to store the variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java index a1d6c5bfb3a..8ae31dde667 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = AssignSub.OP_NAME, + inputsClass = AssignSub.Inputs.class +) @Operator public final class AssignSub extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class AssignSub extends RawOp implements Operand outputRef; - private AssignSub(Operation operation) { - super(operation); + public AssignSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = AssignSub.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java index ddd83dc834e..6de4c159ee1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSubVariableOp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the decremented value or a subsequent newer one. */ +@OpMetadata( + opType = AssignSubVariableOp.OP_NAME, + inputsClass = AssignSubVariableOp.Inputs.class +) @Operator public final class AssignSubVariableOp extends RawOp { /** @@ -42,8 +48,8 @@ public final class AssignSubVariableOp extends RawOp { */ public static final String OP_NAME = "AssignSubVariableOp"; - private AssignSubVariableOp(Operation operation) { - super(operation); + public AssignSubVariableOp(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static AssignSubVariableOp create(Scope scope, Operand r return new AssignSubVariableOp(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = AssignSubVariableOp.class + ) public static class Inputs extends RawOpInputs { /** * handle to the resource in which to store the variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java index 98dd0682d0d..8a158a0f014 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Any ReadVariableOp with a control dependency on this op is guaranteed to return * this value or a subsequent newer value of the variable. */ +@OpMetadata( + opType = AssignVariableOp.OP_NAME, + inputsClass = AssignVariableOp.Inputs.class +) @Operator public final class AssignVariableOp extends RawOp { /** @@ -42,8 +48,8 @@ public final class AssignVariableOp extends RawOp { */ public static final String OP_NAME = "AssignVariableOp"; - private AssignVariableOp(Operation operation) { - super(operation); + public AssignVariableOp(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static AssignVariableOp create(Scope scope, Operand reso return new AssignVariableOp(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = AssignVariableOp.class + ) public static class Inputs extends RawOpInputs { /** * handle to the resource in which to store the variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java index a722c116dd7..0bb0e71fc7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Barrier.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -45,6 +47,10 @@ * incomplete element has some undefined components in its value tuple, * and may be updated using BarrierInsertMany. */ +@OpMetadata( + opType = Barrier.OP_NAME, + inputsClass = Barrier.Inputs.class +) @Operator public final class Barrier extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class Barrier extends RawOp implements Operand { private Output handle; - private Barrier(Operation operation) { - super(operation); + public Barrier(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -247,6 +253,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Barrier.class + ) public static class Inputs extends RawOpInputs { /** * The type of each component in a value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java index 509f714fb88..ba5c6c37b2b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierClose.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * continue to succeed if sufficient completed elements remain in the barrier. * Subsequent TakeMany operations that would block will fail immediately. */ +@OpMetadata( + opType = BarrierClose.OP_NAME, + inputsClass = BarrierClose.Inputs.class +) @Operator public final class BarrierClose extends RawOp { /** @@ -45,8 +51,8 @@ public final class BarrierClose extends RawOp { */ public static final String OP_NAME = "BarrierClose"; - private BarrierClose(Operation operation) { - super(operation); + public BarrierClose(Operation operation) { + super(operation, OP_NAME); } /** @@ -108,6 +114,9 @@ public Options cancelPendingEnqueues(Boolean cancelPendingEnqueues) { } } + @OpInputsMetadata( + outputsClass = BarrierClose.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a barrier. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java index 1fe4a2be77e..88528da89eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierIncompleteSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -34,6 +36,10 @@ /** * Computes the number of incomplete elements in the given barrier. */ +@OpMetadata( + opType = BarrierIncompleteSize.OP_NAME, + inputsClass = BarrierIncompleteSize.Inputs.class +) @Operator public final class BarrierIncompleteSize extends RawOp implements Operand { /** @@ -43,8 +49,8 @@ public final class BarrierIncompleteSize extends RawOp implements Operand output; - private BarrierIncompleteSize(Operation operation) { - super(operation); + public BarrierIncompleteSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BarrierIncompleteSize.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a barrier. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java index b5d28bdc4da..c660005a872 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierInsertMany.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * already has a value at component_index, this operation will fail with * INVALID_ARGUMENT, and leave the barrier in an undefined state. */ +@OpMetadata( + opType = BarrierInsertMany.OP_NAME, + inputsClass = BarrierInsertMany.Inputs.class +) @Operator public final class BarrierInsertMany extends RawOp { /** @@ -45,8 +51,8 @@ public final class BarrierInsertMany extends RawOp { */ public static final String OP_NAME = "BarrierInsertMany"; - private BarrierInsertMany(Operation operation) { - super(operation); + public BarrierInsertMany(Operation operation) { + super(operation, OP_NAME); } /** @@ -73,6 +79,9 @@ public static BarrierInsertMany create(Scope scope, Operand handle, return new BarrierInsertMany(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BarrierInsertMany.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a barrier. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java index f38db0fd368..ded885bc84d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierReadySize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -34,6 +36,10 @@ /** * Computes the number of complete elements in the given barrier. */ +@OpMetadata( + opType = BarrierReadySize.OP_NAME, + inputsClass = BarrierReadySize.Inputs.class +) @Operator public final class BarrierReadySize extends RawOp implements Operand { /** @@ -43,8 +49,8 @@ public final class BarrierReadySize extends RawOp implements Operand { private Output output; - private BarrierReadySize(Operation operation) { - super(operation); + public BarrierReadySize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BarrierReadySize.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a barrier. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java index baf77154ae3..18dacc2b92d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BarrierTakeMany.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -45,6 +47,10 @@ * information about the batch in which each element was originally inserted * into the barrier. */ +@OpMetadata( + opType = BarrierTakeMany.OP_NAME, + inputsClass = BarrierTakeMany.Inputs.class +) @Operator public final class BarrierTakeMany extends RawOp { /** @@ -59,8 +65,8 @@ public final class BarrierTakeMany extends RawOp { private List> values; @SuppressWarnings("unchecked") - private BarrierTakeMany(Operation operation) { - super(operation); + public BarrierTakeMany(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; indices = operation.output(outputIdx++); keys = operation.output(outputIdx++); @@ -219,6 +225,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = BarrierTakeMany.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a barrier. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java index ce2a30e4fce..b6437531c11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Batch.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -67,6 +69,10 @@ * empty, the op name will be used as the shared name. * T: the types of tensors to be batched. */ +@OpMetadata( + opType = Batch.OP_NAME, + inputsClass = Batch.Inputs.class +) @Operator public final class Batch extends RawOp { /** @@ -81,8 +87,8 @@ public final class Batch extends RawOp { private Output id; @SuppressWarnings("unchecked") - private Batch(Operation operation) { - super(operation); + public Batch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int batchedTensorsLength = operation.outputListLength("batched_tensors"); batchedTensors = Arrays.asList(operation.outputList(outputIdx, batchedTensorsLength)); @@ -311,6 +317,9 @@ public Options batchingQueue(String batchingQueue) { } } + @OpInputsMetadata( + outputsClass = Batch.class + ) public static class Inputs extends RawOpInputs { /** * The inTensors input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java index 25cba54989e..59537a13d93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchFunction.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -70,6 +72,10 @@ *

SparseTensor is not supported. The return value of the decorated function * must be a Tensor or a list/tuple of Tensors. */ +@OpMetadata( + opType = BatchFunction.OP_NAME, + inputsClass = BatchFunction.Inputs.class +) @Operator public final class BatchFunction extends RawOp implements Iterable> { /** @@ -80,8 +86,8 @@ public final class BatchFunction extends RawOp implements Iterable> outTensors; @SuppressWarnings("unchecked") - private BatchFunction(Operation operation) { - super(operation); + public BatchFunction(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outTensorsLength = operation.outputListLength("out_tensors"); outTensors = Arrays.asList(operation.outputList(outputIdx, outTensorsLength)); @@ -353,6 +359,9 @@ public Options enableLargeBatchSplitting(Boolean enableLargeBatchSplitting) { } } + @OpInputsMetadata( + outputsClass = BatchFunction.class + ) public static class Inputs extends RawOpInputs { /** * The tensors to be batched. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java index 9562debefb7..b4752272b0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchToSpace.OP_NAME, + inputsClass = BatchToSpace.Inputs.class +) @Operator public final class BatchToSpace extends RawOp implements Operand { /** @@ -52,8 +58,8 @@ public final class BatchToSpace extends RawOp implements Operan private Output output; - private BatchToSpace(Operation operation) { - super(operation); + public BatchToSpace(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -150,6 +156,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchToSpace.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D tensor with shape diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java index f197bf88fe1..c056ee528d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchToSpaceNd.OP_NAME, + inputsClass = BatchToSpaceNd.Inputs.class +) @Operator public final class BatchToSpaceNd extends RawOp implements Operand { /** @@ -52,8 +58,8 @@ public final class BatchToSpaceNd extends RawOp implements Oper private Output output; - private BatchToSpaceNd(Operation operation) { - super(operation); + public BatchToSpaceNd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -185,6 +191,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchToSpaceNd.class + ) public static class Inputs extends RawOpInputs> { /** * N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java index e0982cfbd92..7a191514e42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -95,6 +97,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Bitcast.OP_NAME, + inputsClass = Bitcast.Inputs.class +) @Operator public final class Bitcast extends RawOp implements Operand { /** @@ -104,8 +110,8 @@ public final class Bitcast extends RawOp implements Operand private Output output; - private Bitcast(Operation operation) { - super(operation); + public Bitcast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Bitcast.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java index 55d084bf691..90df4598114 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code r0} output */ +@OpMetadata( + opType = BroadcastDynamicShape.OP_NAME, + inputsClass = BroadcastDynamicShape.Inputs.class +) @Operator public final class BroadcastDynamicShape extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class BroadcastDynamicShape extends RawOp implem private Output r0; - private BroadcastDynamicShape(Operation operation) { - super(operation); + public BroadcastDynamicShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; r0 = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return r0; } + @OpInputsMetadata( + outputsClass = BroadcastDynamicShape.class + ) public static class Inputs extends RawOpInputs> { /** * The s0 input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java index 807cf34adb8..c70e82caeb9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code r0} output */ +@OpMetadata( + opType = BroadcastGradientArgs.OP_NAME, + inputsClass = BroadcastGradientArgs.Inputs.class +) public final class BroadcastGradientArgs extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BroadcastGradientArgs extends RawOp { private Output r1; - private BroadcastGradientArgs(Operation operation) { - super(operation); + public BroadcastGradientArgs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; r0 = operation.output(outputIdx++); r1 = operation.output(outputIdx++); @@ -91,6 +97,9 @@ public Output r1() { return r1; } + @OpInputsMetadata( + outputsClass = BroadcastGradientArgs.class + ) public static class Inputs extends RawOpInputs> { /** * The s0 input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java index 1761611649e..02f7187275f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -65,6 +67,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BroadcastTo.OP_NAME, + inputsClass = BroadcastTo.Inputs.class +) @Operator public final class BroadcastTo extends RawOp implements Operand { /** @@ -74,8 +80,8 @@ public final class BroadcastTo extends RawOp implements Operand private Output output; - private BroadcastTo(Operation operation) { - super(operation); + public BroadcastTo(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BroadcastTo.class + ) public static class Inputs extends RawOpInputs> { /** * A Tensor to broadcast. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java index 9ce50864f2b..68c99f15181 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bucketize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -45,6 +47,10 @@ * [3, 2] * [1, 3]] */ +@OpMetadata( + opType = Bucketize.OP_NAME, + inputsClass = Bucketize.Inputs.class +) @Operator public final class Bucketize extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class Bucketize extends RawOp implements Operand { private Output output; - private Bucketize(Operation operation) { - super(operation); + public Bucketize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Bucketize.class + ) public static class Inputs extends RawOpInputs { /** * Any shape of Tensor contains with int or float type. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java index 0936dc33ac8..a031889ee23 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ClipByValue.OP_NAME, + inputsClass = ClipByValue.Inputs.class +) @Operator public final class ClipByValue extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class ClipByValue extends RawOp implements Operand private Output output; - private ClipByValue(Operation operation) { - super(operation); + public ClipByValue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ClipByValue.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java index 65d455900d6..d30e8e38019 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @deprecated use {@link org.tensorflow.op.collective.Gather} instead */ +@OpMetadata( + opType = CollectiveGather.OP_NAME, + inputsClass = CollectiveGather.Inputs.class +) @Deprecated public final class CollectiveGather extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class CollectiveGather extends RawOp implements private Output data; - private CollectiveGather(Operation operation) { - super(operation); + public CollectiveGather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = CollectiveGather.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java index 863f03c3063..05a2295e86a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantFromComponents.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * Returns a scalar variant tensor containing a single {@code CompositeTensorVariant} * with the specified Tensor components and TypeSpec. */ +@OpMetadata( + opType = CompositeTensorVariantFromComponents.OP_NAME, + inputsClass = CompositeTensorVariantFromComponents.Inputs.class +) public final class CompositeTensorVariantFromComponents extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class CompositeTensorVariantFromComponents extends RawOp implements private Output encoded; @SuppressWarnings("unchecked") - private CompositeTensorVariantFromComponents(Operation operation) { - super(operation); + public CompositeTensorVariantFromComponents(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; encoded = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return (Output) encoded; } + @OpInputsMetadata( + outputsClass = CompositeTensorVariantFromComponents.class + ) public static class Inputs extends RawOpInputs { /** * The component tensors for the extension type value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java index 061ee7bf0be..b1bf76d5de7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CompositeTensorVariantToComponents.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ *

Raises an error if {@code type_spec_proto} doesn't match the TypeSpec * in {@code encoded}. */ +@OpMetadata( + opType = CompositeTensorVariantToComponents.OP_NAME, + inputsClass = CompositeTensorVariantToComponents.Inputs.class +) public final class CompositeTensorVariantToComponents extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class CompositeTensorVariantToComponents extends RawOp implements I private List> components; @SuppressWarnings("unchecked") - private CompositeTensorVariantToComponents(Operation operation) { - super(operation); + public CompositeTensorVariantToComponents(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -94,6 +100,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = CompositeTensorVariantToComponents.class + ) public static class Inputs extends RawOpInputs { /** * A scalar {@code variant} Tensor containing an encoded ExtensionType value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java index d4b8127f644..4abd12fe0d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Concat.OP_NAME, + inputsClass = Concat.Inputs.class +) @Operator public final class Concat extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class Concat extends RawOp implements Operand { private Output output; - private Concat(Operation operation) { - super(operation); + public Concat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Concat.class + ) public static class Inputs extends RawOpInputs> { /** * List of {@code N} Tensors to concatenate. Their ranks and types must match, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java index 3f66976d9c6..bb847edc7cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConsumeMutexLock.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ *

NOTE: This operation must run on the same device as its input. This may * be enforced via the {@code colocate_with} mechanism. */ +@OpMetadata( + opType = ConsumeMutexLock.OP_NAME, + inputsClass = ConsumeMutexLock.Inputs.class +) @Operator public final class ConsumeMutexLock extends RawOp { /** @@ -45,8 +51,8 @@ public final class ConsumeMutexLock extends RawOp { */ public static final String OP_NAME = "ConsumeMutexLock"; - private ConsumeMutexLock(Operation operation) { - super(operation); + public ConsumeMutexLock(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static ConsumeMutexLock create(Scope scope, Operand mute return new ConsumeMutexLock(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ConsumeMutexLock.class + ) public static class Inputs extends RawOpInputs { /** * A tensor returned by {@code MutexLock}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java index 03a2c6485d2..a17d125586f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ControlTrigger.java @@ -25,12 +25,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; /** * Does nothing. Serves as a control trigger for scheduling. * Only useful as a placeholder for control edges. */ +@OpMetadata( + opType = ControlTrigger.OP_NAME, + inputsClass = ControlTrigger.Inputs.class +) @Operator public final class ControlTrigger extends RawOp { /** @@ -38,8 +44,8 @@ public final class ControlTrigger extends RawOp { */ public static final String OP_NAME = "ControlTrigger"; - private ControlTrigger(Operation operation) { - super(operation); + public ControlTrigger(Operation operation) { + super(operation, OP_NAME); } /** @@ -56,6 +62,9 @@ public static ControlTrigger create(Scope scope) { return new ControlTrigger(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ControlTrigger.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new ControlTrigger(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java index 4138011fac8..4fb2dbf1269 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Copy.OP_NAME, + inputsClass = Copy.Inputs.class +) public final class Copy extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -51,8 +57,8 @@ public final class Copy extends RawOp implements Operand { private Output output; - private Copy(Operation operation) { - super(operation); + public Copy(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -195,6 +201,9 @@ public Options debugOpsSpec(String... debugOpsSpec) { } } + @OpInputsMetadata( + outputsClass = Copy.class + ) public static class Inputs extends RawOpInputs> { /** * Input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java index d35bd8e9b81..0af9c1ac794 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CopyHost.OP_NAME, + inputsClass = CopyHost.Inputs.class +) public final class CopyHost extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class CopyHost extends RawOp implements Operand private Output output; - private CopyHost(Operation operation) { - super(operation); + public CopyHost(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -193,6 +199,9 @@ public Options debugOpsSpec(String... debugOpsSpec) { } } + @OpInputsMetadata( + outputsClass = CopyHost.class + ) public static class Inputs extends RawOpInputs> { /** * Input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java index 94ca8305dd7..7ce1ece5a75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CountUpTo.OP_NAME, + inputsClass = CountUpTo.Inputs.class +) @Operator public final class CountUpTo extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class CountUpTo extends RawOp implements Operand private Output output; - private CountUpTo(Operation operation) { - super(operation); + public CountUpTo(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CountUpTo.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a scalar {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java index 1011cac53b7..7c1818a97eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -89,6 +91,10 @@ * * */ +@OpMetadata( + opType = DecodeProto.OP_NAME, + inputsClass = DecodeProto.Inputs.class +) @Operator public final class DecodeProto extends RawOp { /** @@ -101,8 +107,8 @@ public final class DecodeProto extends RawOp { private List> values; @SuppressWarnings("unchecked") - private DecodeProto(Operation operation) { - super(operation); + public DecodeProto(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sizes = operation.output(outputIdx++); int valuesLength = operation.outputListLength("values"); @@ -253,6 +259,9 @@ public Options sanitize(Boolean sanitize) { } } + @OpInputsMetadata( + outputsClass = DecodeProto.class + ) public static class Inputs extends RawOpInputs { /** * Tensor of serialized protos with shape {@code batch_shape}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java index ea55c769019..01d71313c42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = DeepCopy.OP_NAME, + inputsClass = DeepCopy.Inputs.class +) @Operator public final class DeepCopy extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class DeepCopy extends RawOp implements Operand private Output y; - private DeepCopy(Operation operation) { - super(operation); + public DeepCopy(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = DeepCopy.class + ) public static class Inputs extends RawOpInputs> { /** * The source tensor of type {@code T}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java index 47e499eba70..de259a8599b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeleteSessionTensor.java @@ -26,12 +26,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** * Delete the tensor specified by its handle in the session. */ +@OpMetadata( + opType = DeleteSessionTensor.OP_NAME, + inputsClass = DeleteSessionTensor.Inputs.class +) @Operator public final class DeleteSessionTensor extends RawOp { /** @@ -39,8 +45,8 @@ public final class DeleteSessionTensor extends RawOp { */ public static final String OP_NAME = "DeleteSessionTensor"; - private DeleteSessionTensor(Operation operation) { - super(operation); + public DeleteSessionTensor(Operation operation) { + super(operation, OP_NAME); } /** @@ -59,6 +65,9 @@ public static DeleteSessionTensor create(Scope scope, Operand handle) { return new DeleteSessionTensor(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeleteSessionTensor.class + ) public static class Inputs extends RawOpInputs { /** * The handle for a tensor stored in the session state. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java index 040c054f085..b4baf185453 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyResourceOp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ * All subsequent operations using the resource will result in a NotFound * error status. */ +@OpMetadata( + opType = DestroyResourceOp.OP_NAME, + inputsClass = DestroyResourceOp.Inputs.class +) @Operator public final class DestroyResourceOp extends RawOp { /** @@ -41,8 +47,8 @@ public final class DestroyResourceOp extends RawOp { */ public static final String OP_NAME = "DestroyResourceOp"; - private DestroyResourceOp(Operation operation) { - super(operation); + public DestroyResourceOp(Operation operation) { + super(operation, OP_NAME); } /** @@ -103,6 +109,9 @@ public Options ignoreLookupError(Boolean ignoreLookupError) { } } + @OpInputsMetadata( + outputsClass = DestroyResourceOp.class + ) public static class Inputs extends RawOpInputs { /** * handle to the resource to delete. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java index 7d828ee733b..527f7179901 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code value} output */ +@OpMetadata( + opType = DestroyTemporaryVariable.OP_NAME, + inputsClass = DestroyTemporaryVariable.Inputs.class +) @Operator public final class DestroyTemporaryVariable extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class DestroyTemporaryVariable extends RawOp imple private Output value; - private DestroyTemporaryVariable(Operation operation) { - super(operation); + public DestroyTemporaryVariable(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; value = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return value; } + @OpInputsMetadata( + outputsClass = DestroyTemporaryVariable.class + ) public static class Inputs extends RawOpInputs> { /** * A reference to the temporary variable tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java index a06f456040c..fdb7d9c0b10 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeviceIndex.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; /** @@ -37,6 +39,10 @@ * (1) Device does not exist in the given device list. * (2) It is in XLA compilation. */ +@OpMetadata( + opType = DeviceIndex.OP_NAME, + inputsClass = DeviceIndex.Inputs.class +) public final class DeviceIndex extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class DeviceIndex extends RawOp implements Operand { private Output index; - private DeviceIndex(Operation operation) { - super(operation); + public DeviceIndex(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; index = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return index; } + @OpInputsMetadata( + outputsClass = DeviceIndex.class + ) public static class Inputs extends RawOpInputs { /** * The deviceNames attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java index 3415b800196..3be75c630c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DummyMemoryCache.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DummyMemoryCache operation */ +@OpMetadata( + opType = DummyMemoryCache.OP_NAME, + inputsClass = DummyMemoryCache.Inputs.class +) public final class DummyMemoryCache extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class DummyMemoryCache extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private DummyMemoryCache(Operation operation) { - super(operation); + public DummyMemoryCache(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -76,6 +82,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DummyMemoryCache.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new DummyMemoryCache(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java index 5c65adef756..cebd26df319 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -70,6 +72,10 @@ * * @param data type for {@code outputs} output */ +@OpMetadata( + opType = DynamicPartition.OP_NAME, + inputsClass = DynamicPartition.Inputs.class +) @Operator public final class DynamicPartition extends RawOp implements Iterable> { /** @@ -80,8 +86,8 @@ public final class DynamicPartition extends RawOp implements It private List> outputs; @SuppressWarnings("unchecked") - private DynamicPartition(Operation operation) { - super(operation); + public DynamicPartition(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); @@ -125,6 +131,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = DynamicPartition.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java index da206083909..012d1231a37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -91,6 +93,10 @@ * * @param data type for {@code merged} output */ +@OpMetadata( + opType = DynamicStitch.OP_NAME, + inputsClass = DynamicStitch.Inputs.class +) @Operator public final class DynamicStitch extends RawOp implements Operand { /** @@ -100,8 +106,8 @@ public final class DynamicStitch extends RawOp implements Opera private Output merged; - private DynamicStitch(Operation operation) { - super(operation); + public DynamicStitch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; merged = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Output asOutput() { return merged; } + @OpInputsMetadata( + outputsClass = DynamicStitch.class + ) public static class Inputs extends RawOpInputs> { /** * The indices input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java index 63ab6da64a9..8e3557d5022 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EditDistance.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -41,6 +43,10 @@ * (truth_indices, truth_values, truth_shape). *

The inputs are: */ +@OpMetadata( + opType = EditDistance.OP_NAME, + inputsClass = EditDistance.Inputs.class +) @Operator public final class EditDistance extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class EditDistance extends RawOp implements Operand { private Output output; - private EditDistance(Operation operation) { - super(operation); + public EditDistance(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -175,6 +181,9 @@ public Options normalize(Boolean normalize) { } } + @OpInputsMetadata( + outputsClass = EditDistance.class + ) public static class Inputs extends RawOpInputs { /** * The indices of the hypothesis list SparseTensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java index 0a06df36054..6aea00cf250 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Empty.OP_NAME, + inputsClass = Empty.Inputs.class +) @Operator public final class Empty extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class Empty extends RawOp implements Operand { private Output output; - private Empty(Operation operation) { - super(operation); + public Empty(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options init(Boolean init) { } } + @OpInputsMetadata( + outputsClass = Empty.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. Represents the shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java index 25fc298a92a..1ebd1112e2e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorList.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. */ +@OpMetadata( + opType = EmptyTensorList.OP_NAME, + inputsClass = EmptyTensorList.Inputs.class +) @Operator public final class EmptyTensorList extends RawOp implements Operand { /** @@ -52,8 +58,8 @@ public final class EmptyTensorList extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private EmptyTensorList(Operation operation) { - super(operation); + public EmptyTensorList(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = EmptyTensorList.class + ) public static class Inputs extends RawOpInputs { /** * The elementShape input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java index ffdf712e26e..a6fb97ceda1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EmptyTensorMap.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ * Creates and returns an empty tensor map. * handle: an empty tensor map */ +@OpMetadata( + opType = EmptyTensorMap.OP_NAME, + inputsClass = EmptyTensorMap.Inputs.class +) @Operator public final class EmptyTensorMap extends RawOp implements Operand { /** @@ -44,8 +50,8 @@ public final class EmptyTensorMap extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private EmptyTensorMap(Operation operation) { - super(operation); + public EmptyTensorMap(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = EmptyTensorMap.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new EmptyTensorMap(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java index 3beb517b637..2eb65960230 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EncodeProto.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -82,6 +84,10 @@ * * */ +@OpMetadata( + opType = EncodeProto.OP_NAME, + inputsClass = EncodeProto.Inputs.class +) @Operator public final class EncodeProto extends RawOp implements Operand { /** @@ -91,8 +97,8 @@ public final class EncodeProto extends RawOp implements Operand { private Output bytes; - private EncodeProto(Operation operation) { - super(operation); + public EncodeProto(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; bytes = operation.output(outputIdx++); } @@ -177,6 +183,9 @@ public Options descriptorSource(String descriptorSource) { } } + @OpInputsMetadata( + outputsClass = EncodeProto.class + ) public static class Inputs extends RawOpInputs { /** * Tensor of int32 with shape {@code [batch_shape, len(field_names)]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java index 3c7443c40b7..13bdd8f7e94 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = EnsureShape.OP_NAME, + inputsClass = EnsureShape.Inputs.class +) @Operator public final class EnsureShape extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class EnsureShape extends RawOp implements Operand private Output output; - private EnsureShape(Operation operation) { - super(operation); + public EnsureShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = EnsureShape.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor, whose shape is to be validated. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java index 17562be32ee..82e1831b5f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Enter.OP_NAME, + inputsClass = Enter.Inputs.class +) public final class Enter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class Enter extends RawOp implements Operand { private Output output; - private Enter(Operation operation) { - super(operation); + public Enter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -153,6 +159,9 @@ public Options parallelIterations(Long parallelIterations) { } } + @OpInputsMetadata( + outputsClass = Enter.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the child frame. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java index 2942db758b4..ee8dcfb8ceb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Exit.OP_NAME, + inputsClass = Exit.Inputs.class +) public final class Exit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class Exit extends RawOp implements Operand { private Output output; - private Exit(Operation operation) { - super(operation); + public Exit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Exit.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the parent frame. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java index 0086c4259b3..4d7c1eea3cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -60,6 +62,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ExpandDims.OP_NAME, + inputsClass = ExpandDims.Inputs.class +) @Operator public final class ExpandDims extends RawOp implements Operand { /** @@ -69,8 +75,8 @@ public final class ExpandDims extends RawOp implements Operand< private Output output; - private ExpandDims(Operation operation) { - super(operation); + public ExpandDims(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -112,6 +118,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ExpandDims.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java index 9db29df2224..669b81d1ae0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code patches} output */ +@OpMetadata( + opType = ExtractVolumePatches.OP_NAME, + inputsClass = ExtractVolumePatches.Inputs.class +) @Operator public final class ExtractVolumePatches extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class ExtractVolumePatches extends RawOp impleme private Output patches; - private ExtractVolumePatches(Operation operation) { - super(operation); + public ExtractVolumePatches(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; patches = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return patches; } + @OpInputsMetadata( + outputsClass = ExtractVolumePatches.class + ) public static class Inputs extends RawOpInputs> { /** * 5-D Tensor with shape {@code [batch, in_planes, in_rows, in_cols, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java index 1df3bb32e39..3041b796443 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -54,6 +56,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Fill.OP_NAME, + inputsClass = Fill.Inputs.class +) @Operator public final class Fill extends RawOp implements Operand { /** @@ -63,8 +69,8 @@ public final class Fill extends RawOp implements Operand { private Output output; - private Fill(Operation operation) { - super(operation); + public Fill(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Fill.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. Represents the shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java index 805a0ad1025..d6ef4704e45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fingerprint.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -59,6 +61,10 @@ * *

For string data, one should expect {@code Fingerprint(data) != Fingerprint(ReduceJoin(data))} in general. */ +@OpMetadata( + opType = Fingerprint.OP_NAME, + inputsClass = Fingerprint.Inputs.class +) @Operator public final class Fingerprint extends RawOp implements Operand { /** @@ -68,8 +74,8 @@ public final class Fingerprint extends RawOp implements Operand { private Output fingerprint; - private Fingerprint(Operation operation) { - super(operation); + public Fingerprint(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; fingerprint = operation.output(outputIdx++); } @@ -110,6 +116,9 @@ public Output asOutput() { return fingerprint; } + @OpInputsMetadata( + outputsClass = Fingerprint.class + ) public static class Inputs extends RawOpInputs { /** * Must have rank 1 or higher. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/For.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/For.java index 8089e05d434..e7778c59f31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/For.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/For.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ * output = body(i, output); * */ +@OpMetadata( + opType = For.OP_NAME, + inputsClass = For.Inputs.class +) @Operator public final class For extends RawOp implements Iterable> { /** @@ -53,8 +59,8 @@ public final class For extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private For(Operation operation) { - super(operation); + public For(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -105,6 +111,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = For.class + ) public static class Inputs extends RawOpInputs { /** * The lower bound. An int32 diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java index 1507b1ca9ca..2d0275f26f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -59,6 +61,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Gather.OP_NAME, + inputsClass = Gather.Inputs.class +) @Operator public final class Gather extends RawOp implements Operand { /** @@ -68,8 +74,8 @@ public final class Gather extends RawOp implements Operand { private Output output; - private Gather(Operation operation) { - super(operation); + public Gather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -152,6 +158,9 @@ public Options batchDims(Long batchDims) { } } + @OpInputsMetadata( + outputsClass = Gather.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor from which to gather values. Must be at least rank diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java index 0b4ee538a18..0528f5d3fbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -126,6 +128,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = GatherNd.OP_NAME, + inputsClass = GatherNd.Inputs.class +) @Operator public final class GatherNd extends RawOp implements Operand { /** @@ -135,8 +141,8 @@ public final class GatherNd extends RawOp implements Operand private Output output; - private GatherNd(Operation operation) { - super(operation); + public GatherNd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -176,6 +182,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = GatherNd.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor from which to gather values. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java index 60a52b39aaa..0538620d314 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionHandle.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Store the input tensor in the state of the current session. */ +@OpMetadata( + opType = GetSessionHandle.OP_NAME, + inputsClass = GetSessionHandle.Inputs.class +) @Operator public final class GetSessionHandle extends RawOp implements Operand { /** @@ -44,8 +50,8 @@ public final class GetSessionHandle extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private GetSessionHandle(Operation operation) { - super(operation); + public GetSessionHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GetSessionHandle.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to be stored. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java index c8a85d0b714..417eba05f1d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * * @param data type for {@code value} output */ +@OpMetadata( + opType = GetSessionTensor.OP_NAME, + inputsClass = GetSessionTensor.Inputs.class +) @Operator public final class GetSessionTensor extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class GetSessionTensor extends RawOp implements Op private Output value; - private GetSessionTensor(Operation operation) { - super(operation); + public GetSessionTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; value = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return value; } + @OpInputsMetadata( + outputsClass = GetSessionTensor.class + ) public static class Inputs extends RawOpInputs> { /** * The handle for a tensor stored in the session state. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java index 2f6e8c9dda2..4a2a67691b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = GuaranteeConst.OP_NAME, + inputsClass = GuaranteeConst.Inputs.class +) @Operator public final class GuaranteeConst extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class GuaranteeConst extends RawOp implements Oper private Output output; - private GuaranteeConst(Operation operation) { - super(operation); + public GuaranteeConst(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = GuaranteeConst.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java index f81970293ed..d2791779391 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HashTable.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * Before using the table you will have to initialize it. After initialization the * table will be immutable. */ +@OpMetadata( + opType = HashTable.OP_NAME, + inputsClass = HashTable.Inputs.class +) @Operator public final class HashTable extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class HashTable extends RawOp implements Operand { private Output tableHandle; @SuppressWarnings("unchecked") - private HashTable(Operation operation) { - super(operation); + public HashTable(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tableHandle = operation.output(outputIdx++); } @@ -187,6 +193,9 @@ public Options useNodeNameSharing(Boolean useNodeNameSharing) { } } + @OpInputsMetadata( + outputsClass = HashTable.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this table is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java index fc7ac24dbaf..13011fdd573 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -52,6 +54,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = HistogramFixedWidth.OP_NAME, + inputsClass = HistogramFixedWidth.Inputs.class +) @Operator public final class HistogramFixedWidth extends RawOp implements Operand { /** @@ -61,8 +67,8 @@ public final class HistogramFixedWidth extends RawOp implemen private Output out; - private HistogramFixedWidth(Operation operation) { - super(operation); + public HistogramFixedWidth(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -128,6 +134,9 @@ public Output asOutput() { return out; } + @OpInputsMetadata( + outputsClass = HistogramFixedWidth.class + ) public static class Inputs extends RawOpInputs> { /** * Numeric {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java index a07329e1b19..e8b8f336869 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Identity.OP_NAME, + inputsClass = Identity.Inputs.class +) @Operator public final class Identity extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class Identity extends RawOp implements Operand private Output output; - private Identity(Operation operation) { - super(operation); + public Identity(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Identity.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java index ec6b7c51e61..fc06bb10126 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IdentityN.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -50,6 +52,10 @@ * return [None, g(dy)] # Do not backprop to f(x). * */ +@OpMetadata( + opType = IdentityN.OP_NAME, + inputsClass = IdentityN.Inputs.class +) @Operator public final class IdentityN extends RawOp implements Iterable> { /** @@ -60,8 +66,8 @@ public final class IdentityN extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private IdentityN(Operation operation) { - super(operation); + public IdentityN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -99,6 +105,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = IdentityN.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java index 8dbe85ffc6e..beacc9d4de7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = ImmutableConst.OP_NAME, + inputsClass = ImmutableConst.Inputs.class +) @Operator public final class ImmutableConst extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class ImmutableConst extends RawOp implements Oper private Output tensor; - private ImmutableConst(Operation operation) { - super(operation); + public ImmutableConst(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return tensor; } + @OpInputsMetadata( + outputsClass = ImmutableConst.class + ) public static class Inputs extends RawOpInputs> { /** * Type of the returned tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java index fa23039a392..2350327dfa7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTable.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -33,6 +35,10 @@ /** * Table initializer that takes two tensors for keys and values respectively. */ +@OpMetadata( + opType = InitializeTable.OP_NAME, + inputsClass = InitializeTable.Inputs.class +) @Operator public final class InitializeTable extends RawOp { /** @@ -40,8 +46,8 @@ public final class InitializeTable extends RawOp { */ public static final String OP_NAME = "InitializeTableV2"; - private InitializeTable(Operation operation) { - super(operation); + public InitializeTable(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static InitializeTable create(Scope scope, Operand table return new InitializeTable(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = InitializeTable.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a table which will be initialized. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java index 96f0274ddf7..f01af12b7bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InitializeTableFromTextFile.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * on {@code delimiter}. * */ +@OpMetadata( + opType = InitializeTableFromTextFile.OP_NAME, + inputsClass = InitializeTableFromTextFile.Inputs.class +) @Operator public final class InitializeTableFromTextFile extends RawOp { /** @@ -51,8 +57,8 @@ public final class InitializeTableFromTextFile extends RawOp { */ public static final String OP_NAME = "InitializeTableFromTextFileV2"; - private InitializeTableFromTextFile(Operation operation) { - super(operation); + public InitializeTableFromTextFile(Operation operation) { + super(operation, OP_NAME); } /** @@ -171,6 +177,9 @@ public Options offset(Long offset) { } } + @OpInputsMetadata( + outputsClass = InitializeTableFromTextFile.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a table which will be initialized. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java index b0f8ad1113c..71fcbe428d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = InplaceAdd.OP_NAME, + inputsClass = InplaceAdd.Inputs.class +) @Operator public final class InplaceAdd extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class InplaceAdd extends RawOp implements Operand< private Output y; - private InplaceAdd(Operation operation) { - super(operation); + public InplaceAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = InplaceAdd.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java index 87329d1405f..596a281f92c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = InplaceSub.OP_NAME, + inputsClass = InplaceSub.Inputs.class +) @Operator public final class InplaceSub extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class InplaceSub extends RawOp implements Operand< private Output y; - private InplaceSub(Operation operation) { - super(operation); + public InplaceSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = InplaceSub.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java index b3cc7b8ca35..72237006fe3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = InplaceUpdate.OP_NAME, + inputsClass = InplaceUpdate.Inputs.class +) @Operator public final class InplaceUpdate extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class InplaceUpdate extends RawOp implements Opera private Output y; - private InplaceUpdate(Operation operation) { - super(operation); + public InplaceUpdate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = InplaceUpdate.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor of type {@code T}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java index 2d386193212..b838486c19f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/IsVariableInitialized.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -36,6 +38,10 @@ * Checks whether a tensor has been initialized. * Outputs boolean scalar indicating whether the tensor has been initialized. */ +@OpMetadata( + opType = IsVariableInitialized.OP_NAME, + inputsClass = IsVariableInitialized.Inputs.class +) @Operator public final class IsVariableInitialized extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class IsVariableInitialized extends RawOp implements Operand private Output isInitialized; - private IsVariableInitialized(Operation operation) { - super(operation); + public IsVariableInitialized(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; isInitialized = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return isInitialized; } + @OpInputsMetadata( + outputsClass = IsVariableInitialized.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. May be uninitialized. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java index d8d7a3c2411..fe0b69c2112 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/KthOrderStatistic.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -47,6 +49,10 @@ * equal to the Kth order statistic. The semantics are not the same as * top_k_unique. */ +@OpMetadata( + opType = KthOrderStatistic.OP_NAME, + inputsClass = KthOrderStatistic.Inputs.class +) @Operator public final class KthOrderStatistic extends RawOp implements Operand { /** @@ -56,8 +62,8 @@ public final class KthOrderStatistic extends RawOp implements Operand private Output output; - private KthOrderStatistic(Operation operation) { - super(operation); + public KthOrderStatistic(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = KthOrderStatistic.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java index b8d1000d67e..085181e808e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = LinSpace.OP_NAME, + inputsClass = LinSpace.Inputs.class +) public final class LinSpace extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class LinSpace extends RawOp implements Operand< private Output output; - private LinSpace(Operation operation) { - super(operation); + public LinSpace(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = LinSpace.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D tensor. First entry in the range. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java index d36982e1128..bfc4a05a26c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = LookupTableExport.OP_NAME, + inputsClass = LookupTableExport.Inputs.class +) @Operator public final class LookupTableExport extends RawOp { /** @@ -50,8 +56,8 @@ public final class LookupTableExport extends R private Output values; - private LookupTableExport(Operation operation) { - super(operation); + public LookupTableExport(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; keys = operation.output(outputIdx++); values = operation.output(outputIdx++); @@ -98,6 +104,9 @@ public Output values() { return values; } + @OpInputsMetadata( + outputsClass = LookupTableExport.class + ) public static class Inputs extends RawOpInputs> { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java index fb9508e2d30..c9c192867a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = LookupTableFind.OP_NAME, + inputsClass = LookupTableFind.Inputs.class +) @Operator public final class LookupTableFind extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class LookupTableFind extends RawOp implements Ope private Output values; - private LookupTableFind(Operation operation) { - super(operation); + public LookupTableFind(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; values = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return values; } + @OpInputsMetadata( + outputsClass = LookupTableFind.class + ) public static class Inputs extends RawOpInputs> { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java index b2483f12b25..0223c4370a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableImport.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * The tensor {@code keys} must be of the same type as the keys of the table. * The tensor {@code values} must be of the type of the table values. */ +@OpMetadata( + opType = LookupTableImport.OP_NAME, + inputsClass = LookupTableImport.Inputs.class +) @Operator public final class LookupTableImport extends RawOp { /** @@ -42,8 +48,8 @@ public final class LookupTableImport extends RawOp { */ public static final String OP_NAME = "LookupTableImportV2"; - private LookupTableImport(Operation operation) { - super(operation); + public LookupTableImport(Operation operation) { + super(operation, OP_NAME); } /** @@ -67,6 +73,9 @@ public static LookupTableImport create(Scope scope, Operand tab return new LookupTableImport(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = LookupTableImport.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java index 143032f960a..a3b7207c594 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableInsert.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * The tensor {@code keys} must be of the same type as the keys of the table. * The tensor {@code values} must be of the type of the table values. */ +@OpMetadata( + opType = LookupTableInsert.OP_NAME, + inputsClass = LookupTableInsert.Inputs.class +) @Operator public final class LookupTableInsert extends RawOp { /** @@ -42,8 +48,8 @@ public final class LookupTableInsert extends RawOp { */ public static final String OP_NAME = "LookupTableInsertV2"; - private LookupTableInsert(Operation operation) { - super(operation); + public LookupTableInsert(Operation operation) { + super(operation, OP_NAME); } /** @@ -67,6 +73,9 @@ public static LookupTableInsert create(Scope scope, Operand tab return new LookupTableInsert(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = LookupTableInsert.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java index 1e6a0864a29..26b91e332cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableRemove.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * The tensor {@code keys} must of the same type as the keys of the table. Keys not * already in the table are silently ignored. */ +@OpMetadata( + opType = LookupTableRemove.OP_NAME, + inputsClass = LookupTableRemove.Inputs.class +) public final class LookupTableRemove extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LookupTableRemoveV2"; - private LookupTableRemove(Operation operation) { - super(operation); + public LookupTableRemove(Operation operation) { + super(operation, OP_NAME); } /** @@ -63,6 +69,9 @@ public static LookupTableRemove create(Scope scope, Operand tab return new LookupTableRemove(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = LookupTableRemove.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java index e437e22fa0e..406e4a7c8f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Computes the number of elements in the given table. */ +@OpMetadata( + opType = LookupTableSize.OP_NAME, + inputsClass = LookupTableSize.Inputs.class +) @Operator public final class LookupTableSize extends RawOp implements Operand { /** @@ -43,8 +49,8 @@ public final class LookupTableSize extends RawOp implements Operand { private Output output; - private LookupTableSize(Operation operation) { - super(operation); + public LookupTableSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = LookupTableSize.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the table. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java index 1322b0c6e8f..48a363cda0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LoopCond.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; @@ -35,6 +37,10 @@ * This operator represents the loop termination condition used by the * "pivot" switches of a loop. */ +@OpMetadata( + opType = LoopCond.OP_NAME, + inputsClass = LoopCond.Inputs.class +) @Operator public final class LoopCond extends RawOp implements Operand { /** @@ -44,8 +50,8 @@ public final class LoopCond extends RawOp implements Operand { private Output output; - private LoopCond(Operation operation) { - super(operation); + public LoopCond(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = LoopCond.class + ) public static class Inputs extends RawOpInputs { /** * A boolean scalar, representing the branch predicate of the Switch op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java index 53a0a5b7028..7950ad2686b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = LowerBound.OP_NAME, + inputsClass = LowerBound.Inputs.class +) public final class LowerBound extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -59,8 +65,8 @@ public final class LowerBound extends RawOp implements Operan private Output output; - private LowerBound(Operation operation) { - super(operation); + public LowerBound(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -123,6 +129,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = LowerBound.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D Tensor where each row is ordered. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java index 6e4ba6ab699..bb738fe065b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MakeUnique.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -37,6 +39,10 @@ * of the corresponding output element. Behavior for infinite elements is * undefined. Behavior for subnormal elements is undefined. */ +@OpMetadata( + opType = MakeUnique.OP_NAME, + inputsClass = MakeUnique.Inputs.class +) @Operator public final class MakeUnique extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class MakeUnique extends RawOp implements Operand { private Output output; - private MakeUnique(Operation operation) { - super(operation); + public MakeUnique(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = MakeUnique.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java index 035faa1eac7..25616fab77d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapClear.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Op removes all elements in the underlying container. */ +@OpMetadata( + opType = MapClear.OP_NAME, + inputsClass = MapClear.Inputs.class +) @Operator public final class MapClear extends RawOp { /** @@ -41,8 +47,8 @@ public final class MapClear extends RawOp { */ public static final String OP_NAME = "MapClear"; - private MapClear(Operation operation) { - super(operation); + public MapClear(Operation operation) { + super(operation, OP_NAME); } /** @@ -179,6 +185,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = MapClear.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java index f7762d6dbce..1be93e14437 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapDefun.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ *

Note that this op is not exposed to users directly, but is invoked in tf.data * rewrites. */ +@OpMetadata( + opType = MapDefun.OP_NAME, + inputsClass = MapDefun.Inputs.class +) public final class MapDefun extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -56,8 +62,8 @@ public final class MapDefun extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private MapDefun(Operation operation) { - super(operation); + public MapDefun(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -158,6 +164,9 @@ public Options maxIntraOpParallelism(Long maxIntraOpParallelism) { } } + @OpInputsMetadata( + outputsClass = MapDefun.class + ) public static class Inputs extends RawOpInputs { /** *

diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java
index 1f93fd8fae3..01f5a5db918 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapIncompleteSize.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -37,6 +39,10 @@
 /**
  * Op returns the number of incomplete elements in the underlying container.
  */
+@OpMetadata(
+    opType = MapIncompleteSize.OP_NAME,
+    inputsClass = MapIncompleteSize.Inputs.class
+)
 @Operator
 public final class MapIncompleteSize extends RawOp implements Operand {
   /**
@@ -46,8 +52,8 @@ public final class MapIncompleteSize extends RawOp implements Operand {
 
   private Output output;
 
-  private MapIncompleteSize(Operation operation) {
-    super(operation);
+  public MapIncompleteSize(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -200,6 +206,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapIncompleteSize.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The capacity attribute
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java
index 6c0a543d536..651f1fad54c 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapPeek.java
@@ -30,6 +30,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -41,6 +43,10 @@
  * underlying container does not contain this key
  * this op will block until it does.
  */
+@OpMetadata(
+    opType = MapPeek.OP_NAME,
+    inputsClass = MapPeek.Inputs.class
+)
 @Operator
 public final class MapPeek extends RawOp implements Iterable> {
   /**
@@ -51,8 +57,8 @@ public final class MapPeek extends RawOp implements Iterable> {
   private List> values;
 
   @SuppressWarnings("unchecked")
-  private MapPeek(Operation operation) {
-    super(operation);
+  public MapPeek(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int valuesLength = operation.outputListLength("values");
     values = Arrays.asList(operation.outputList(outputIdx, valuesLength));
@@ -212,6 +218,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapPeek.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The key input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java
index 30beae4600a..df150021bc0 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapSize.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -37,6 +39,10 @@
 /**
  * Op returns the number of elements in the underlying container.
  */
+@OpMetadata(
+    opType = MapSize.OP_NAME,
+    inputsClass = MapSize.Inputs.class
+)
 @Operator
 public final class MapSize extends RawOp implements Operand {
   /**
@@ -46,8 +52,8 @@ public final class MapSize extends RawOp implements Operand {
 
   private Output output;
 
-  private MapSize(Operation operation) {
-    super(operation);
+  public MapSize(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -200,6 +206,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapSize.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The capacity attribute
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java
index 2a23b4711f3..f6638c34873 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapStage.java
@@ -28,6 +28,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -37,6 +39,10 @@
 /**
  * Stage (key, values) in the underlying container which behaves like a hashtable.
  */
+@OpMetadata(
+    opType = MapStage.OP_NAME,
+    inputsClass = MapStage.Inputs.class
+)
 @Operator
 public final class MapStage extends RawOp {
   /**
@@ -44,8 +50,8 @@ public final class MapStage extends RawOp {
    */
   public static final String OP_NAME = "MapStage";
 
-  private MapStage(Operation operation) {
-    super(operation);
+  public MapStage(Operation operation) {
+    super(operation, OP_NAME);
   }
 
   /**
@@ -193,6 +199,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapStage.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * int64
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java
index 7eb18247706..f5d26ae1171 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstage.java
@@ -30,6 +30,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -41,6 +43,10 @@
  * from the underlying container.   If the underlying container
  * does not contain this key, the op will block until it does.
  */
+@OpMetadata(
+    opType = MapUnstage.OP_NAME,
+    inputsClass = MapUnstage.Inputs.class
+)
 @Operator
 public final class MapUnstage extends RawOp implements Iterable> {
   /**
@@ -51,8 +57,8 @@ public final class MapUnstage extends RawOp implements Iterable>
   private List> values;
 
   @SuppressWarnings("unchecked")
-  private MapUnstage(Operation operation) {
-    super(operation);
+  public MapUnstage(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int valuesLength = operation.outputListLength("values");
     values = Arrays.asList(operation.outputList(outputIdx, valuesLength));
@@ -212,6 +218,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapUnstage.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The key input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java
index 753817a5d3b..e1975a4a4e2 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MapUnstageNoKey.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -40,6 +42,10 @@
  * from the underlying container.   If the underlying container
  * does not contain elements, the op will block until it does.
  */
+@OpMetadata(
+    opType = MapUnstageNoKey.OP_NAME,
+    inputsClass = MapUnstageNoKey.Inputs.class
+)
 @Operator
 public final class MapUnstageNoKey extends RawOp {
   /**
@@ -52,8 +58,8 @@ public final class MapUnstageNoKey extends RawOp {
   private List> values;
 
   @SuppressWarnings("unchecked")
-  private MapUnstageNoKey(Operation operation) {
-    super(operation);
+  public MapUnstageNoKey(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     key = operation.output(outputIdx++);
     int valuesLength = operation.outputListLength("values");
@@ -215,6 +221,9 @@ public Options sharedName(String sharedName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = MapUnstageNoKey.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The indices input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java
index 71ae0f7c0e5..bc5577a29f3 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -40,6 +42,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = Max.OP_NAME,
+    inputsClass = Max.Inputs.class
+)
 @Operator
 public final class Max extends RawOp implements Operand {
   /**
@@ -49,8 +55,8 @@ public final class Max extends RawOp implements Operand {
 
   private Output output;
 
-  private Max(Operation operation) {
-    super(operation);
+  public Max(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -129,6 +135,9 @@ public Options keepDims(Boolean keepDims) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = Max.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The tensor to reduce.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java
index 1117e800c59..e06fe25d533 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java
@@ -28,6 +28,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
@@ -42,6 +44,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = Merge.OP_NAME,
+    inputsClass = Merge.Inputs.class
+)
 @Operator
 public final class Merge extends RawOp {
   /**
@@ -53,8 +59,8 @@ public final class Merge extends RawOp {
 
   private Output valueIndex;
 
-  private Merge(Operation operation) {
-    super(operation);
+  public Merge(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
     valueIndex = operation.output(outputIdx++);
@@ -95,6 +101,9 @@ public Output valueIndex() {
     return valueIndex;
   }
 
+  @OpInputsMetadata(
+      outputsClass = Merge.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The input tensors, exactly one of which will become available.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java
index 695c92a70e8..8e7700b2dba 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -40,6 +42,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = Min.OP_NAME,
+    inputsClass = Min.Inputs.class
+)
 @Operator
 public final class Min extends RawOp implements Operand {
   /**
@@ -49,8 +55,8 @@ public final class Min extends RawOp implements Operand {
 
   private Output output;
 
-  private Min(Operation operation) {
-    super(operation);
+  public Min(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -129,6 +135,9 @@ public Options keepDims(Boolean keepDims) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = Min.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The tensor to reduce.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java
index d557fc71ea5..192e74449da 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -58,6 +60,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = MirrorPad.OP_NAME,
+    inputsClass = MirrorPad.Inputs.class
+)
 @Operator
 public final class MirrorPad extends RawOp implements Operand {
   /**
@@ -67,8 +73,8 @@ public final class MirrorPad extends RawOp implements Operand output;
 
-  private MirrorPad(Operation operation) {
-    super(operation);
+  public MirrorPad(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -114,6 +120,9 @@ public Output asOutput() {
     return output;
   }
 
+  @OpInputsMetadata(
+      outputsClass = MirrorPad.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The input tensor to be padded.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java
index 6d4efbf43c3..8f14d7d0ddb 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
 import org.tensorflow.types.family.TType;
@@ -50,6 +52,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = MirrorPadGrad.OP_NAME,
+    inputsClass = MirrorPadGrad.Inputs.class
+)
 public final class MirrorPadGrad extends RawOp implements Operand {
   /**
    * The name of this op, as known by TensorFlow core engine
@@ -58,8 +64,8 @@ public final class MirrorPadGrad extends RawOp implements Opera
 
   private Output output;
 
-  private MirrorPadGrad(Operation operation) {
-    super(operation);
+  public MirrorPadGrad(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -101,6 +107,9 @@ public Output asOutput() {
     return output;
   }
 
+  @OpInputsMetadata(
+      outputsClass = MirrorPadGrad.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The input tensor to be folded.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java
index 67dda410e1f..8881c597d79 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MlirPassthroughOp.java
@@ -30,6 +30,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -65,6 +67,10 @@
  * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
  * 
*/ +@OpMetadata( + opType = MlirPassthroughOp.OP_NAME, + inputsClass = MlirPassthroughOp.Inputs.class +) @Operator public final class MlirPassthroughOp extends RawOp implements Iterable> { /** @@ -75,8 +81,8 @@ public final class MlirPassthroughOp extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private MlirPassthroughOp(Operation operation) { - super(operation); + public MlirPassthroughOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -119,6 +125,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = MlirPassthroughOp.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java index ad2418679a5..6e92465db2b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableDenseHashTable.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. */ +@OpMetadata( + opType = MutableDenseHashTable.OP_NAME, + inputsClass = MutableDenseHashTable.Inputs.class +) @Operator public final class MutableDenseHashTable extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class MutableDenseHashTable extends RawOp implements Operand private Output tableHandle; @SuppressWarnings("unchecked") - private MutableDenseHashTable(Operation operation) { - super(operation); + public MutableDenseHashTable(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tableHandle = operation.output(outputIdx++); } @@ -273,6 +279,9 @@ public Options maxLoadFactor(Float maxLoadFactor) { } } + @OpInputsMetadata( + outputsClass = MutableDenseHashTable.class + ) public static class Inputs extends RawOpInputs { /** * The key used to represent empty key buckets internally. Must not diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java index dec668bf3b0..80f45de4bd2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTable.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. */ +@OpMetadata( + opType = MutableHashTable.OP_NAME, + inputsClass = MutableHashTable.Inputs.class +) @Operator public final class MutableHashTable extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class MutableHashTable extends RawOp implements Operand { private Output tableHandle; @SuppressWarnings("unchecked") - private MutableHashTable(Operation operation) { - super(operation); + public MutableHashTable(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tableHandle = operation.output(outputIdx++); } @@ -187,6 +193,9 @@ public Options useNodeNameSharing(Boolean useNodeNameSharing) { } } + @OpInputsMetadata( + outputsClass = MutableHashTable.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this table is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java index 8c11236dc3f..8e6220eb7eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutableHashTableOfTensors.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. */ +@OpMetadata( + opType = MutableHashTableOfTensors.OP_NAME, + inputsClass = MutableHashTableOfTensors.Inputs.class +) @Operator public final class MutableHashTableOfTensors extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class MutableHashTableOfTensors extends RawOp implements Operand tableHandle; @SuppressWarnings("unchecked") - private MutableHashTableOfTensors(Operation operation) { - super(operation); + public MutableHashTableOfTensors(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tableHandle = operation.output(outputIdx++); } @@ -212,6 +218,9 @@ public Options valueShape(Shape valueShape) { } } + @OpInputsMetadata( + outputsClass = MutableHashTableOfTensors.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this table is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java index 0c50b069a1d..5bcaa8f998a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Mutex.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * Creates a Mutex resource that can be locked by {@code MutexLock}. */ +@OpMetadata( + opType = Mutex.OP_NAME, + inputsClass = Mutex.Inputs.class +) @Operator public final class Mutex extends RawOp implements Operand { /** @@ -43,8 +49,8 @@ public final class Mutex extends RawOp implements Operand { private Output resource; @SuppressWarnings("unchecked") - private Mutex(Operation operation) { - super(operation); + public Mutex(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resource = operation.output(outputIdx++); } @@ -147,6 +153,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Mutex.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this variable is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java index 080673e5d78..2e837ea676f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MutexLock.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -66,6 +68,10 @@ *

It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. */ +@OpMetadata( + opType = MutexLock.OP_NAME, + inputsClass = MutexLock.Inputs.class +) @Operator public final class MutexLock extends RawOp implements Operand { /** @@ -76,8 +82,8 @@ public final class MutexLock extends RawOp implements Operand { private Output mutexLock; @SuppressWarnings("unchecked") - private MutexLock(Operation operation) { - super(operation); + public MutexLock(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; mutexLock = operation.output(outputIdx++); } @@ -115,6 +121,9 @@ public Output asOutput() { return (Output) mutexLock; } + @OpInputsMetadata( + outputsClass = MutexLock.class + ) public static class Inputs extends RawOpInputs { /** * The mutex resource to lock. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java index a59d41302c5..9756119224b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -47,6 +49,10 @@ * * @deprecated use {@link org.tensorflow.op.distribute.NcclAllReduce} instead */ +@OpMetadata( + opType = NcclAllReduce.OP_NAME, + inputsClass = NcclAllReduce.Inputs.class +) @Deprecated public final class NcclAllReduce extends RawOp implements Operand { /** @@ -56,8 +62,8 @@ public final class NcclAllReduce extends RawOp implements Ope private Output data; - private NcclAllReduce(Operation operation) { - super(operation); + public NcclAllReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return data; } + @OpInputsMetadata( + outputsClass = NcclAllReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java index c8b9e6d3d88..328ef159e3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @deprecated use {@link org.tensorflow.op.distribute.NcclBroadcast} instead */ +@OpMetadata( + opType = NcclBroadcast.OP_NAME, + inputsClass = NcclBroadcast.Inputs.class +) @Deprecated public final class NcclBroadcast extends RawOp implements Operand { /** @@ -53,8 +59,8 @@ public final class NcclBroadcast extends RawOp implements Ope private Output output; - private NcclBroadcast(Operation operation) { - super(operation); + public NcclBroadcast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = NcclBroadcast.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java index 97f3b57983d..f748163d26b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @deprecated use {@link org.tensorflow.op.distribute.NcclReduce} instead */ +@OpMetadata( + opType = NcclReduce.OP_NAME, + inputsClass = NcclReduce.Inputs.class +) @Deprecated public final class NcclReduce extends RawOp implements Operand { /** @@ -53,8 +59,8 @@ public final class NcclReduce extends RawOp implements Operan private Output data; - private NcclReduce(Operation operation) { - super(operation); + public NcclReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return data; } + @OpInputsMetadata( + outputsClass = NcclReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java index 63f3133570e..89639dc0df2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = NextIteration.OP_NAME, + inputsClass = NextIteration.Inputs.class +) @Operator public final class NextIteration extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class NextIteration extends RawOp implements Opera private Output output; - private NextIteration(Operation operation) { - super(operation); + public NextIteration(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = NextIteration.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the next iteration. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java index 1fd31154b5d..c0a17a52b07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NoOp.java @@ -25,11 +25,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; /** * Does nothing. Only useful as a placeholder for control edges. */ +@OpMetadata( + opType = NoOp.OP_NAME, + inputsClass = NoOp.Inputs.class +) @Operator public final class NoOp extends RawOp { /** @@ -37,8 +43,8 @@ public final class NoOp extends RawOp { */ public static final String OP_NAME = "NoOp"; - private NoOp(Operation operation) { - super(operation); + public NoOp(Operation operation) { + super(operation, OP_NAME); } /** @@ -55,6 +61,9 @@ public static NoOp create(Scope scope) { return new NoOp(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = NoOp.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new NoOp(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java index 52e2c895139..90a6701ea87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -112,6 +114,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = OneHot.OP_NAME, + inputsClass = OneHot.Inputs.class +) @Operator public final class OneHot extends RawOp implements Operand { /** @@ -121,8 +127,8 @@ public final class OneHot extends RawOp implements Operand { private Output output; - private OneHot(Operation operation) { - super(operation); + public OneHot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -204,6 +210,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = OneHot.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor of indices. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java index cba88e78d14..9e5255618ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = OnesLike.OP_NAME, + inputsClass = OnesLike.Inputs.class +) @Operator public final class OnesLike extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class OnesLike extends RawOp implements Operand private Output y; - private OnesLike(Operation operation) { - super(operation); + public OnesLike(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = OnesLike.class + ) public static class Inputs extends RawOpInputs> { /** * a tensor of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java index 7899b26b3a3..7590b5e7c7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapClear.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Op removes all elements in the underlying container. */ +@OpMetadata( + opType = OrderedMapClear.OP_NAME, + inputsClass = OrderedMapClear.Inputs.class +) @Operator public final class OrderedMapClear extends RawOp { /** @@ -41,8 +47,8 @@ public final class OrderedMapClear extends RawOp { */ public static final String OP_NAME = "OrderedMapClear"; - private OrderedMapClear(Operation operation) { - super(operation); + public OrderedMapClear(Operation operation) { + super(operation, OP_NAME); } /** @@ -179,6 +185,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapClear.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java index f04fbc1c64b..9ea80c8e8fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapIncompleteSize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ /** * Op returns the number of incomplete elements in the underlying container. */ +@OpMetadata( + opType = OrderedMapIncompleteSize.OP_NAME, + inputsClass = OrderedMapIncompleteSize.Inputs.class +) @Operator public final class OrderedMapIncompleteSize extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class OrderedMapIncompleteSize extends RawOp implements Operand output; - private OrderedMapIncompleteSize(Operation operation) { - super(operation); + public OrderedMapIncompleteSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -200,6 +206,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapIncompleteSize.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java index 91e50b0b013..47a76d27214 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapPeek.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * this op will block until it does. This Op is optimized for * performance. */ +@OpMetadata( + opType = OrderedMapPeek.OP_NAME, + inputsClass = OrderedMapPeek.Inputs.class +) @Operator public final class OrderedMapPeek extends RawOp implements Iterable> { /** @@ -52,8 +58,8 @@ public final class OrderedMapPeek extends RawOp implements Iterable> values; @SuppressWarnings("unchecked") - private OrderedMapPeek(Operation operation) { - super(operation); + public OrderedMapPeek(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int valuesLength = operation.outputListLength("values"); values = Arrays.asList(operation.outputList(outputIdx, valuesLength)); @@ -213,6 +219,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapPeek.class + ) public static class Inputs extends RawOpInputs { /** * The key input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java index f8f939ba8f4..d680834b582 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapSize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ /** * Op returns the number of elements in the underlying container. */ +@OpMetadata( + opType = OrderedMapSize.OP_NAME, + inputsClass = OrderedMapSize.Inputs.class +) @Operator public final class OrderedMapSize extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class OrderedMapSize extends RawOp implements Operand { private Output output; - private OrderedMapSize(Operation operation) { - super(operation); + public OrderedMapSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -200,6 +206,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapSize.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java index 14edbf02357..320da76943d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapStage.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * Stage (key, values) in the underlying container which behaves like a ordered * associative container. Elements are ordered by key. */ +@OpMetadata( + opType = OrderedMapStage.OP_NAME, + inputsClass = OrderedMapStage.Inputs.class +) @Operator public final class OrderedMapStage extends RawOp { /** @@ -45,8 +51,8 @@ public final class OrderedMapStage extends RawOp { */ public static final String OP_NAME = "OrderedMapStage"; - private OrderedMapStage(Operation operation) { - super(operation); + public OrderedMapStage(Operation operation) { + super(operation, OP_NAME); } /** @@ -194,6 +200,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapStage.class + ) public static class Inputs extends RawOpInputs { /** * int64 diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java index e6596e04550..da638467953 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstage.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. */ +@OpMetadata( + opType = OrderedMapUnstage.OP_NAME, + inputsClass = OrderedMapUnstage.Inputs.class +) @Operator public final class OrderedMapUnstage extends RawOp implements Iterable> { /** @@ -51,8 +57,8 @@ public final class OrderedMapUnstage extends RawOp implements Iterable> values; @SuppressWarnings("unchecked") - private OrderedMapUnstage(Operation operation) { - super(operation); + public OrderedMapUnstage(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int valuesLength = operation.outputListLength("values"); values = Arrays.asList(operation.outputList(outputIdx, valuesLength)); @@ -212,6 +218,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapUnstage.class + ) public static class Inputs extends RawOpInputs { /** * The key input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java index 7bc8fb6219b..cc08b12987b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OrderedMapUnstageNoKey.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. */ +@OpMetadata( + opType = OrderedMapUnstageNoKey.OP_NAME, + inputsClass = OrderedMapUnstageNoKey.Inputs.class +) @Operator public final class OrderedMapUnstageNoKey extends RawOp { /** @@ -52,8 +58,8 @@ public final class OrderedMapUnstageNoKey extends RawOp { private List> values; @SuppressWarnings("unchecked") - private OrderedMapUnstageNoKey(Operation operation) { - super(operation); + public OrderedMapUnstageNoKey(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; key = operation.output(outputIdx++); int valuesLength = operation.outputListLength("values"); @@ -215,6 +221,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OrderedMapUnstageNoKey.class + ) public static class Inputs extends RawOpInputs { /** * The indices input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java index 434a94076fa..84dc8946ab9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Pad.OP_NAME, + inputsClass = Pad.Inputs.class +) @Operator public final class Pad extends RawOp implements Operand { /** @@ -66,8 +72,8 @@ public final class Pad extends RawOp implements Operand { private Output output; - private Pad(Operation operation) { - super(operation); + public Pad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Pad.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java index 093ac6c690d..7f8e1fb9b88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ParallelConcat.OP_NAME, + inputsClass = ParallelConcat.Inputs.class +) @Operator public final class ParallelConcat extends RawOp implements Operand { /** @@ -60,8 +66,8 @@ public final class ParallelConcat extends RawOp implements Oper private Output output; - private ParallelConcat(Operation operation) { - super(operation); + public ParallelConcat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -102,6 +108,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ParallelConcat.class + ) public static class Inputs extends RawOpInputs> { /** * Tensors to be concatenated. All must have size 1 in the first dimension diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java index abba8e503f3..04a68a9162c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -90,6 +92,10 @@ * * @param data type for {@code merged} output */ +@OpMetadata( + opType = ParallelDynamicStitch.OP_NAME, + inputsClass = ParallelDynamicStitch.Inputs.class +) @Operator public final class ParallelDynamicStitch extends RawOp implements Operand { /** @@ -99,8 +105,8 @@ public final class ParallelDynamicStitch extends RawOp implemen private Output merged; - private ParallelDynamicStitch(Operation operation) { - super(operation); + public ParallelDynamicStitch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; merged = operation.output(outputIdx++); } @@ -139,6 +145,9 @@ public Output asOutput() { return merged; } + @OpInputsMetadata( + outputsClass = ParallelDynamicStitch.class + ) public static class Inputs extends RawOpInputs> { /** * The indices input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java index 11a54982337..58663caf629 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Placeholder.OP_NAME, + inputsClass = Placeholder.Inputs.class +) @Operator public final class Placeholder extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class Placeholder extends RawOp implements Operand private Output output; - private Placeholder(Operation operation) { - super(operation); + public Placeholder(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options shape(Shape shape) { } } + @OpInputsMetadata( + outputsClass = Placeholder.class + ) public static class Inputs extends RawOpInputs> { /** * The type of elements in the tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java index dec98e433f9..c12c8cbe9a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = PlaceholderWithDefault.OP_NAME, + inputsClass = PlaceholderWithDefault.Inputs.class +) @Operator public final class PlaceholderWithDefault extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class PlaceholderWithDefault extends RawOp impleme private Output output; - private PlaceholderWithDefault(Operation operation) { - super(operation); + public PlaceholderWithDefault(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = PlaceholderWithDefault.class + ) public static class Inputs extends RawOpInputs> { /** * The default value to produce when {@code output} is not fed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java index d957b42e290..5314118c811 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Print.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -33,6 +35,10 @@ * Prints a string scalar. * Prints a string scalar to the desired output_stream. */ +@OpMetadata( + opType = Print.OP_NAME, + inputsClass = Print.Inputs.class +) @Operator public final class Print extends RawOp { /** @@ -40,8 +46,8 @@ public final class Print extends RawOp { */ public static final String OP_NAME = "PrintV2"; - private Print(Operation operation) { - super(operation); + public Print(Operation operation) { + super(operation, OP_NAME); } /** @@ -125,6 +131,9 @@ public Options end(String end) { } } + @OpInputsMetadata( + outputsClass = Print.class + ) public static class Inputs extends RawOpInputs { /** * The string scalar to print. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java index 7404306824e..fc75390d995 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Prod.OP_NAME, + inputsClass = Prod.Inputs.class +) @Operator public final class Prod extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class Prod extends RawOp implements Operand { private Output output; - private Prod(Operation operation) { - super(operation); + public Prod(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = Prod.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java index b301d427311..1b9bbcf38eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedReshape.OP_NAME, + inputsClass = QuantizedReshape.Inputs.class +) @Operator public final class QuantizedReshape extends RawOp { /** @@ -51,8 +57,8 @@ public final class QuantizedReshape extends RawOp { private Output outputMax; - private QuantizedReshape(Operation operation) { - super(operation); + public QuantizedReshape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -110,6 +116,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = QuantizedReshape.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java index d06a61b77d7..3acfff8153b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Range.OP_NAME, + inputsClass = Range.Inputs.class +) @Operator public final class Range extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class Range extends RawOp implements Operand private Output output; - private Range(Operation operation) { - super(operation); + public Range(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Range.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D (scalar). First entry in the sequence. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java index 8561087dad8..ca96957eeda 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Rank.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -45,6 +47,10 @@ * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or "ndims." */ +@OpMetadata( + opType = Rank.OP_NAME, + inputsClass = Rank.Inputs.class +) @Operator public final class Rank extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class Rank extends RawOp implements Operand { private Output output; - private Rank(Operation operation) { - super(operation); + public Rank(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Rank.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java index f461b9fd855..699f3a2b6d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code value} output */ +@OpMetadata( + opType = ReadVariableOp.OP_NAME, + inputsClass = ReadVariableOp.Inputs.class +) @Operator public final class ReadVariableOp extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class ReadVariableOp extends RawOp implements Oper private Output value; - private ReadVariableOp(Operation operation) { - super(operation); + public ReadVariableOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; value = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return value; } + @OpInputsMetadata( + outputsClass = ReadVariableOp.class + ) public static class Inputs extends RawOpInputs> { /** * handle to the resource in which to store the variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java index 5235cae884c..2b62d24bb82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = Recv.OP_NAME, + inputsClass = Recv.Inputs.class +) public final class Recv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class Recv extends RawOp implements Operand { private Output tensor; - private Recv(Operation operation) { - super(operation); + public Recv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); } @@ -136,6 +142,9 @@ public Options clientTerminated(Boolean clientTerminated) { } } + @OpInputsMetadata( + outputsClass = Recv.class + ) public static class Inputs extends RawOpInputs> { /** * The tensorType attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java index 8858cdeb7f4..0560ec3fe42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAll.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. */ +@OpMetadata( + opType = ReduceAll.OP_NAME, + inputsClass = ReduceAll.Inputs.class +) @Operator public final class ReduceAll extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class ReduceAll extends RawOp implements Operand { private Output output; - private ReduceAll(Operation operation) { - super(operation); + public ReduceAll(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceAll.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java index 5dcc0185ac8..3d37b5b7f48 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceAny.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. */ +@OpMetadata( + opType = ReduceAny.OP_NAME, + inputsClass = ReduceAny.Inputs.class +) @Operator public final class ReduceAny extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class ReduceAny extends RawOp implements Operand { private Output output; - private ReduceAny(Operation operation) { - super(operation); + public ReduceAny(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceAny.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java index 913b043560d..1327ee68ba8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReduceMax.OP_NAME, + inputsClass = ReduceMax.Inputs.class +) @Operator public final class ReduceMax extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class ReduceMax extends RawOp implements Operand private Output output; - private ReduceMax(Operation operation) { - super(operation); + public ReduceMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceMax.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java index bf69583091c..928d6affc56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReduceMin.OP_NAME, + inputsClass = ReduceMin.Inputs.class +) @Operator public final class ReduceMin extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class ReduceMin extends RawOp implements Operand private Output output; - private ReduceMin(Operation operation) { - super(operation); + public ReduceMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceMin.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java index eb133e52ded..d4c9284eb4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReduceProd.OP_NAME, + inputsClass = ReduceProd.Inputs.class +) @Operator public final class ReduceProd extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class ReduceProd extends RawOp implements Operand< private Output output; - private ReduceProd(Operation operation) { - super(operation); + public ReduceProd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceProd.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java index 45222cee07a..0057c41c1de 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReduceSum.OP_NAME, + inputsClass = ReduceSum.Inputs.class +) @Operator public final class ReduceSum extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class ReduceSum extends RawOp implements Operand output; - private ReduceSum(Operation operation) { - super(operation); + public ReduceSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = ReduceSum.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java index aad1e84ea0d..987b8e95ef8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefEnter.OP_NAME, + inputsClass = RefEnter.Inputs.class +) public final class RefEnter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RefEnter extends RawOp implements Operand private Output output; - private RefEnter(Operation operation) { - super(operation); + public RefEnter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -152,6 +158,9 @@ public Options parallelIterations(Long parallelIterations) { } } + @OpInputsMetadata( + outputsClass = RefEnter.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the child frame. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java index 42a11c140ed..1b7b349f507 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefExit.OP_NAME, + inputsClass = RefExit.Inputs.class +) public final class RefExit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RefExit extends RawOp implements Operand private Output output; - private RefExit(Operation operation) { - super(operation); + public RefExit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RefExit.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the parent frame. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java index b9623c181f1..20d8e137307 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefIdentity.OP_NAME, + inputsClass = RefIdentity.Inputs.class +) public final class RefIdentity extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RefIdentity extends RawOp implements Operand private Output output; - private RefIdentity(Operation operation) { - super(operation); + public RefIdentity(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RefIdentity.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java index 911e7d845cf..c5e9183fb95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefMerge.OP_NAME, + inputsClass = RefMerge.Inputs.class +) public final class RefMerge extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -51,8 +57,8 @@ public final class RefMerge extends RawOp { private Output valueIndex; - private RefMerge(Operation operation) { - super(operation); + public RefMerge(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); valueIndex = operation.output(outputIdx++); @@ -93,6 +99,9 @@ public Output valueIndex() { return valueIndex; } + @OpInputsMetadata( + outputsClass = RefMerge.class + ) public static class Inputs extends RawOpInputs> { /** * The input tensors, exactly one of which will become available. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java index 4a56e3f5394..2497ea77192 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefNextIteration.OP_NAME, + inputsClass = RefNextIteration.Inputs.class +) @Operator public final class RefNextIteration extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class RefNextIteration extends RawOp implements Op private Output output; - private RefNextIteration(Operation operation) { - super(operation); + public RefNextIteration(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RefNextIteration.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be made available to the next iteration. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java index 4e449068fdd..4b1a228e4c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RefSelect.OP_NAME, + inputsClass = RefSelect.Inputs.class +) @Operator public final class RefSelect extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class RefSelect extends RawOp implements Operand output; - private RefSelect(Operation operation) { - super(operation); + public RefSelect(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RefSelect.class + ) public static class Inputs extends RawOpInputs> { /** * A scalar that determines the input that gets selected. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java index 47c7a63df80..34a38b08917 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -40,6 +42,10 @@ * * @param data type for {@code output_false} output */ +@OpMetadata( + opType = RefSwitch.OP_NAME, + inputsClass = RefSwitch.Inputs.class +) @Operator public final class RefSwitch extends RawOp { /** @@ -51,8 +57,8 @@ public final class RefSwitch extends RawOp { private Output outputTrue; - private RefSwitch(Operation operation) { - super(operation); + public RefSwitch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputFalse = operation.output(outputIdx++); outputTrue = operation.output(outputIdx++); @@ -96,6 +102,9 @@ public Output outputTrue() { return outputTrue; } + @OpInputsMetadata( + outputsClass = RefSwitch.class + ) public static class Inputs extends RawOpInputs> { /** * The ref tensor to be forwarded to the appropriate output. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteCall.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteCall.java index 4f53a7940a6..e2dc2c02a80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteCall.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RemoteCall.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ /** * Runs function {@code f} on a remote device indicated by {@code target}. */ +@OpMetadata( + opType = RemoteCall.OP_NAME, + inputsClass = RemoteCall.Inputs.class +) @Operator public final class RemoteCall extends RawOp implements Iterable> { /** @@ -49,8 +55,8 @@ public final class RemoteCall extends RawOp implements Iterable> private List> output; @SuppressWarnings("unchecked") - private RemoteCall(Operation operation) { - super(operation); + public RemoteCall(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -95,6 +101,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = RemoteCall.class + ) public static class Inputs extends RawOpInputs { /** * A fully specified device name where we want to run the function. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java index e412bf4ccc7..c6fa9bdcb01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -91,6 +93,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Reshape.OP_NAME, + inputsClass = Reshape.Inputs.class +) @Operator public final class Reshape extends RawOp implements Operand { /** @@ -100,8 +106,8 @@ public final class Reshape extends RawOp implements Operand private Output output; - private Reshape(Operation operation) { - super(operation); + public Reshape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Reshape.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java index 40d808c3cbd..afcf81f3454 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResourceCountUpTo.OP_NAME, + inputsClass = ResourceCountUpTo.Inputs.class +) @Operator public final class ResourceCountUpTo extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class ResourceCountUpTo extends RawOp implements private Output output; - private ResourceCountUpTo(Operation operation) { - super(operation); + public ResourceCountUpTo(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ResourceCountUpTo.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a scalar {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java index 195677c0764..c583cafab53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -50,6 +52,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResourceGather.OP_NAME, + inputsClass = ResourceGather.Inputs.class +) @Operator public final class ResourceGather extends RawOp implements Operand { /** @@ -59,8 +65,8 @@ public final class ResourceGather extends RawOp implements Oper private Output output; - private ResourceGather(Operation operation) { - super(operation); + public ResourceGather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -167,6 +173,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = ResourceGather.class + ) public static class Inputs extends RawOpInputs> { /** * The resource input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java index 42737e67583..3c5680dff27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResourceGatherNd.OP_NAME, + inputsClass = ResourceGatherNd.Inputs.class +) @Operator public final class ResourceGatherNd extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class ResourceGatherNd extends RawOp implements Op private Output output; - private ResourceGatherNd(Operation operation) { - super(operation); + public ResourceGatherNd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ResourceGatherNd.class + ) public static class Inputs extends RawOpInputs> { /** * The resource input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java index 237c62609ed..d349b92d941 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterAdd.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterAdd.OP_NAME, + inputsClass = ResourceScatterAdd.Inputs.class +) @Operator public final class ResourceScatterAdd extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterAdd extends RawOp { */ public static final String OP_NAME = "ResourceScatterAdd"; - private ResourceScatterAdd(Operation operation) { - super(operation); + public ResourceScatterAdd(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterAdd create(Scope scope, Operand re return new ResourceScatterAdd(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterAdd.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java index 8568d28df4c..5f29312f25c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterDiv.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterDiv.OP_NAME, + inputsClass = ResourceScatterDiv.Inputs.class +) @Operator public final class ResourceScatterDiv extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterDiv extends RawOp { */ public static final String OP_NAME = "ResourceScatterDiv"; - private ResourceScatterDiv(Operation operation) { - super(operation); + public ResourceScatterDiv(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterDiv create(Scope scope, Operand re return new ResourceScatterDiv(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterDiv.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java index 68252db27f2..0773be58a8c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMax.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterMax.OP_NAME, + inputsClass = ResourceScatterMax.Inputs.class +) @Operator public final class ResourceScatterMax extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterMax extends RawOp { */ public static final String OP_NAME = "ResourceScatterMax"; - private ResourceScatterMax(Operation operation) { - super(operation); + public ResourceScatterMax(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterMax create(Scope scope, Operand re return new ResourceScatterMax(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterMax.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java index 1b8a9871b5d..408a3089249 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMin.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterMin.OP_NAME, + inputsClass = ResourceScatterMin.Inputs.class +) @Operator public final class ResourceScatterMin extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterMin extends RawOp { */ public static final String OP_NAME = "ResourceScatterMin"; - private ResourceScatterMin(Operation operation) { - super(operation); + public ResourceScatterMin(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterMin create(Scope scope, Operand re return new ResourceScatterMin(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterMin.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java index bcf7c629746..4bdb292741a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterMul.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterMul.OP_NAME, + inputsClass = ResourceScatterMul.Inputs.class +) @Operator public final class ResourceScatterMul extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterMul extends RawOp { */ public static final String OP_NAME = "ResourceScatterMul"; - private ResourceScatterMul(Operation operation) { - super(operation); + public ResourceScatterMul(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterMul create(Scope scope, Operand re return new ResourceScatterMul(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterMul.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java index f3a8d9746d6..f9856bdb9e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdAdd.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -60,6 +62,10 @@ *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. */ +@OpMetadata( + opType = ResourceScatterNdAdd.OP_NAME, + inputsClass = ResourceScatterNdAdd.Inputs.class +) @Operator public final class ResourceScatterNdAdd extends RawOp { /** @@ -67,8 +73,8 @@ public final class ResourceScatterNdAdd extends RawOp { */ public static final String OP_NAME = "ResourceScatterNdAdd"; - private ResourceScatterNdAdd(Operation operation) { - super(operation); + public ResourceScatterNdAdd(Operation operation) { + super(operation, OP_NAME); } /** @@ -137,6 +143,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceScatterNdAdd.class + ) public static class Inputs extends RawOpInputs { /** * A resource handle. Must be from a VarHandleOp. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java index 0899d59f311..c16711b04d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMax.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * The ResourceScatterNdMax operation */ +@OpMetadata( + opType = ResourceScatterNdMax.OP_NAME, + inputsClass = ResourceScatterNdMax.Inputs.class +) @Operator public final class ResourceScatterNdMax extends RawOp { /** @@ -41,8 +47,8 @@ public final class ResourceScatterNdMax extends RawOp { */ public static final String OP_NAME = "ResourceScatterNdMax"; - private ResourceScatterNdMax(Operation operation) { - super(operation); + public ResourceScatterNdMax(Operation operation) { + super(operation, OP_NAME); } /** @@ -111,6 +117,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceScatterNdMax.class + ) public static class Inputs extends RawOpInputs { /** * A resource handle. Must be from a VarHandleOp. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java index bf135c7094e..86ac1326e49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdMin.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * The ResourceScatterNdMin operation */ +@OpMetadata( + opType = ResourceScatterNdMin.OP_NAME, + inputsClass = ResourceScatterNdMin.Inputs.class +) @Operator public final class ResourceScatterNdMin extends RawOp { /** @@ -41,8 +47,8 @@ public final class ResourceScatterNdMin extends RawOp { */ public static final String OP_NAME = "ResourceScatterNdMin"; - private ResourceScatterNdMin(Operation operation) { - super(operation); + public ResourceScatterNdMin(Operation operation) { + super(operation, OP_NAME); } /** @@ -111,6 +117,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceScatterNdMin.class + ) public static class Inputs extends RawOpInputs { /** * A resource handle. Must be from a VarHandleOp. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java index 5270a1e9bf9..054164d9f9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdSub.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -60,6 +62,10 @@ *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. */ +@OpMetadata( + opType = ResourceScatterNdSub.OP_NAME, + inputsClass = ResourceScatterNdSub.Inputs.class +) @Operator public final class ResourceScatterNdSub extends RawOp { /** @@ -67,8 +73,8 @@ public final class ResourceScatterNdSub extends RawOp { */ public static final String OP_NAME = "ResourceScatterNdSub"; - private ResourceScatterNdSub(Operation operation) { - super(operation); + public ResourceScatterNdSub(Operation operation) { + super(operation, OP_NAME); } /** @@ -137,6 +143,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceScatterNdSub.class + ) public static class Inputs extends RawOpInputs { /** * A resource handle. Must be from a VarHandleOp. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java index ebcfd808172..95d7f8b97be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterNdUpdate.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -61,6 +63,10 @@ *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. */ +@OpMetadata( + opType = ResourceScatterNdUpdate.OP_NAME, + inputsClass = ResourceScatterNdUpdate.Inputs.class +) @Operator public final class ResourceScatterNdUpdate extends RawOp { /** @@ -68,8 +74,8 @@ public final class ResourceScatterNdUpdate extends RawOp { */ public static final String OP_NAME = "ResourceScatterNdUpdate"; - private ResourceScatterNdUpdate(Operation operation) { - super(operation); + public ResourceScatterNdUpdate(Operation operation) { + super(operation, OP_NAME); } /** @@ -138,6 +144,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceScatterNdUpdate.class + ) public static class Inputs extends RawOpInputs { /** * A resource handle. Must be from a VarHandleOp. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java index 73050f58930..7ffb653fc5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterSub.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * */ +@OpMetadata( + opType = ResourceScatterSub.OP_NAME, + inputsClass = ResourceScatterSub.Inputs.class +) @Operator public final class ResourceScatterSub extends RawOp { /** @@ -58,8 +64,8 @@ public final class ResourceScatterSub extends RawOp { */ public static final String OP_NAME = "ResourceScatterSub"; - private ResourceScatterSub(Operation operation) { - super(operation); + public ResourceScatterSub(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static ResourceScatterSub create(Scope scope, Operand re return new ResourceScatterSub(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterSub.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java index ae7645b7d38..ce6757da55b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceScatterUpdate.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * */ +@OpMetadata( + opType = ResourceScatterUpdate.OP_NAME, + inputsClass = ResourceScatterUpdate.Inputs.class +) @Operator public final class ResourceScatterUpdate extends RawOp { /** @@ -52,8 +58,8 @@ public final class ResourceScatterUpdate extends RawOp { */ public static final String OP_NAME = "ResourceScatterUpdate"; - private ResourceScatterUpdate(Operation operation) { - super(operation); + public ResourceScatterUpdate(Operation operation) { + super(operation, OP_NAME); } /** @@ -77,6 +83,9 @@ public static ResourceScatterUpdate create(Scope scope, Operand return new ResourceScatterUpdate(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceScatterUpdate.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java index fa3670c11f9..5da15961a6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceStridedSliceAssign.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ *

NOTE this op currently does not support broadcasting and so {@code value}'s * shape must be exactly the shape produced by the slice of {@code ref}. */ +@OpMetadata( + opType = ResourceStridedSliceAssign.OP_NAME, + inputsClass = ResourceStridedSliceAssign.Inputs.class +) @Operator public final class ResourceStridedSliceAssign extends RawOp { /** @@ -46,8 +52,8 @@ public final class ResourceStridedSliceAssign extends RawOp { */ public static final String OP_NAME = "ResourceStridedSliceAssign"; - private ResourceStridedSliceAssign(Operation operation) { - super(operation); + public ResourceStridedSliceAssign(Operation operation) { + super(operation, OP_NAME); } /** @@ -220,6 +226,9 @@ public Options shrinkAxisMask(Long shrinkAxisMask) { } } + @OpInputsMetadata( + outputsClass = ResourceStridedSliceAssign.class + ) public static class Inputs extends RawOpInputs { /** * The ref input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java index 6cb5d89fa7f..5bdcfc6b3dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -79,6 +81,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Reverse.OP_NAME, + inputsClass = Reverse.Inputs.class +) @Operator public final class Reverse extends RawOp implements Operand { /** @@ -88,8 +94,8 @@ public final class Reverse extends RawOp implements Operand private Output output; - private Reverse(Operation operation) { - super(operation); + public Reverse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Reverse.class + ) public static class Inputs extends RawOpInputs> { /** * Up to 8-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java index 00bc24272c8..5fdc96725c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -85,6 +87,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReverseSequence.OP_NAME, + inputsClass = ReverseSequence.Inputs.class +) @Operator public final class ReverseSequence extends RawOp implements Operand { /** @@ -94,8 +100,8 @@ public final class ReverseSequence extends RawOp implements Ope private Output output; - private ReverseSequence(Operation operation) { - super(operation); + public ReverseSequence(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -176,6 +182,9 @@ public Options batchDim(Long batchDim) { } } + @OpInputsMetadata( + outputsClass = ReverseSequence.class + ) public static class Inputs extends RawOpInputs> { /** * The input to reverse. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java index 83983cf1983..70aaf92a7d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Roll.OP_NAME, + inputsClass = Roll.Inputs.class +) @Operator public final class Roll extends RawOp implements Operand { /** @@ -64,8 +70,8 @@ public final class Roll extends RawOp implements Operand { private Output output; - private Roll(Operation operation) { - super(operation); + public Roll(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Roll.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java index 1f2bb33ba0d..60ee20a9add 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -56,6 +58,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterAdd.OP_NAME, + inputsClass = ScatterAdd.Inputs.class +) @Operator public final class ScatterAdd extends RawOp implements Operand { /** @@ -65,8 +71,8 @@ public final class ScatterAdd extends RawOp implements Operand< private Output outputRef; - private ScatterAdd(Operation operation) { - super(operation); + public ScatterAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -149,6 +155,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterAdd.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java index da43641571e..18920cebab5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterDiv.OP_NAME, + inputsClass = ScatterDiv.Inputs.class +) @Operator public final class ScatterDiv extends RawOp implements Operand { /** @@ -62,8 +68,8 @@ public final class ScatterDiv extends RawOp implements Operand< private Output outputRef; - private ScatterDiv(Operation operation) { - super(operation); + public ScatterDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterDiv.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java index 136fa5c92e1..05563fcc7e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterMax.OP_NAME, + inputsClass = ScatterMax.Inputs.class +) @Operator public final class ScatterMax extends RawOp implements Operand { /** @@ -64,8 +70,8 @@ public final class ScatterMax extends RawOp implements Operan private Output outputRef; - private ScatterMax(Operation operation) { - super(operation); + public ScatterMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterMax.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java index 8b48aecb808..1e05925515a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterMin.OP_NAME, + inputsClass = ScatterMin.Inputs.class +) @Operator public final class ScatterMin extends RawOp implements Operand { /** @@ -64,8 +70,8 @@ public final class ScatterMin extends RawOp implements Operan private Output outputRef; - private ScatterMin(Operation operation) { - super(operation); + public ScatterMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterMin.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java index 8a979cf3a05..bc60bb4ecc9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterMul.OP_NAME, + inputsClass = ScatterMul.Inputs.class +) @Operator public final class ScatterMul extends RawOp implements Operand { /** @@ -62,8 +68,8 @@ public final class ScatterMul extends RawOp implements Operand< private Output outputRef; - private ScatterMul(Operation operation) { - super(operation); + public ScatterMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterMul.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java index ae84b7cf6ae..24b251c016e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -105,6 +107,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ScatterNd.OP_NAME, + inputsClass = ScatterNd.Inputs.class +) @Operator public final class ScatterNd extends RawOp implements Operand { /** @@ -114,8 +120,8 @@ public final class ScatterNd extends RawOp implements Operand output; - private ScatterNd(Operation operation) { - super(operation); + public ScatterNd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ScatterNd.class + ) public static class Inputs extends RawOpInputs> { /** * Index tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java index 50d897a1941..9236cc60451 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -63,6 +65,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterNdAdd.OP_NAME, + inputsClass = ScatterNdAdd.Inputs.class +) @Operator public final class ScatterNdAdd extends RawOp implements Operand { /** @@ -72,8 +78,8 @@ public final class ScatterNdAdd extends RawOp implements Operan private Output outputRef; - private ScatterNdAdd(Operation operation) { - super(operation); + public ScatterNdAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -160,6 +166,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterNdAdd.class + ) public static class Inputs extends RawOpInputs> { /** * A mutable Tensor. Should be from a Variable node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java index 2783d423634..f0e0e82032d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterNdMax.OP_NAME, + inputsClass = ScatterNdMax.Inputs.class +) public final class ScatterNdMax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ScatterNdMax extends RawOp implements Operan private Output outputRef; - private ScatterNdMax(Operation operation) { - super(operation); + public ScatterNdMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterNdMax.class + ) public static class Inputs extends RawOpInputs> { /** * A mutable Tensor. Should be from a Variable node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java index 19415c2abd9..29c0bf8e43f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterNdMin.OP_NAME, + inputsClass = ScatterNdMin.Inputs.class +) public final class ScatterNdMin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ScatterNdMin extends RawOp implements Operan private Output outputRef; - private ScatterNdMin(Operation operation) { - super(operation); + public ScatterNdMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterNdMin.class + ) public static class Inputs extends RawOpInputs> { /** * A mutable Tensor. Should be from a Variable node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java index d8e6cdb20de..95ceaa892eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -64,6 +66,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ScatterNdNonAliasingAdd.OP_NAME, + inputsClass = ScatterNdNonAliasingAdd.Inputs.class +) @Operator public final class ScatterNdNonAliasingAdd extends RawOp implements Operand { /** @@ -73,8 +79,8 @@ public final class ScatterNdNonAliasingAdd extends RawOp implem private Output output; - private ScatterNdNonAliasingAdd(Operation operation) { - super(operation); + public ScatterNdNonAliasingAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -118,6 +124,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ScatterNdNonAliasingAdd.class + ) public static class Inputs extends RawOpInputs> { /** * A Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java index 71100507d7d..da227ea2af8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -64,6 +66,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterNdSub.OP_NAME, + inputsClass = ScatterNdSub.Inputs.class +) @Operator public final class ScatterNdSub extends RawOp implements Operand { /** @@ -73,8 +79,8 @@ public final class ScatterNdSub extends RawOp implements Operan private Output outputRef; - private ScatterNdSub(Operation operation) { - super(operation); + public ScatterNdSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -161,6 +167,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterNdSub.class + ) public static class Inputs extends RawOpInputs> { /** * A mutable Tensor. Should be from a Variable node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java index 9ee766d0c98..e4f24e90bc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -63,6 +65,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterNdUpdate.OP_NAME, + inputsClass = ScatterNdUpdate.Inputs.class +) @Operator public final class ScatterNdUpdate extends RawOp implements Operand { /** @@ -72,8 +78,8 @@ public final class ScatterNdUpdate extends RawOp implements Ope private Output outputRef; - private ScatterNdUpdate(Operation operation) { - super(operation); + public ScatterNdUpdate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -160,6 +166,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterNdUpdate.class + ) public static class Inputs extends RawOpInputs> { /** * A mutable Tensor. Should be from a Variable node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java index 7ebbd6730ad..387b0708afe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterSub.OP_NAME, + inputsClass = ScatterSub.Inputs.class +) @Operator public final class ScatterSub extends RawOp implements Operand { /** @@ -64,8 +70,8 @@ public final class ScatterSub extends RawOp implements Operand< private Output outputRef; - private ScatterSub(Operation operation) { - super(operation); + public ScatterSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterSub.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java index cc9cebb665f..d31d14d25e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -58,6 +60,10 @@ * * @param data type for {@code output_ref} output */ +@OpMetadata( + opType = ScatterUpdate.OP_NAME, + inputsClass = ScatterUpdate.Inputs.class +) @Operator public final class ScatterUpdate extends RawOp implements Operand { /** @@ -67,8 +73,8 @@ public final class ScatterUpdate extends RawOp implements Opera private Output outputRef; - private ScatterUpdate(Operation operation) { - super(operation); + public ScatterUpdate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputRef = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ScatterUpdate.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a {@code Variable} node. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java index b112acf4e8c..6a3f8bf1feb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Select.OP_NAME, + inputsClass = Select.Inputs.class +) @Operator public final class Select extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class Select extends RawOp implements Operand { private Output output; - private Select(Operation operation) { - super(operation); + public Select(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Select.class + ) public static class Inputs extends RawOpInputs> { /** * The condition input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java index 7822dbe0ab6..9cc0f7861de 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Send.java @@ -26,20 +26,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Sends the named tensor from send_device to recv_device. */ +@OpMetadata( + opType = Send.OP_NAME, + inputsClass = Send.Inputs.class +) public final class Send extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "Send"; - private Send(Operation operation) { - super(operation); + public Send(Operation operation) { + super(operation, OP_NAME); } /** @@ -112,6 +118,9 @@ public Options clientTerminated(Boolean clientTerminated) { } } + @OpInputsMetadata( + outputsClass = Send.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to send. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java index b777c48ccbc..790802fa3a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -57,6 +59,10 @@ * * @param data type for {@code idx} output */ +@OpMetadata( + opType = SetDiff1d.OP_NAME, + inputsClass = SetDiff1d.Inputs.class +) @Operator public final class SetDiff1d extends RawOp { /** @@ -68,8 +74,8 @@ public final class SetDiff1d extends RawOp { private Output idx; - private SetDiff1d(Operation operation) { - super(operation); + public SetDiff1d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); idx = operation.output(outputIdx++); @@ -133,6 +139,9 @@ public Output idx() { return idx; } + @OpInputsMetadata( + outputsClass = SetDiff1d.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. Values to keep. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java index 8872c75a20f..8d6494d7c89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ *

If {@code validate_indices} is {@code True}, this op validates the order and range of {@code set} * indices. */ +@OpMetadata( + opType = SetSize.OP_NAME, + inputsClass = SetSize.Inputs.class +) @Operator public final class SetSize extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class SetSize extends RawOp implements Operand { private Output output; - private SetSize(Operation operation) { - super(operation); + public SetSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = SetSize.class + ) public static class Inputs extends RawOpInputs { /** * 2D {@code Tensor}, indices of a {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java index 836ace328b7..a4b24786234 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -45,6 +47,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Shape.OP_NAME, + inputsClass = Shape.Inputs.class +) @Operator public final class Shape extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class Shape extends RawOp implements Operand private Output output; - private Shape(Operation operation) { - super(operation); + public Shape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Shape.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java index 34a95d4de91..766e6e27880 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ShapeN.OP_NAME, + inputsClass = ShapeN.Inputs.class +) @Operator public final class ShapeN extends RawOp implements Iterable> { /** @@ -52,8 +58,8 @@ public final class ShapeN extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private ShapeN(Operation operation) { - super(operation); + public ShapeN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -109,6 +115,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = ShapeN.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java index 2a0eed3e437..a15035267cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Size.OP_NAME, + inputsClass = Size.Inputs.class +) @Operator public final class Size extends RawOp implements Operand { /** @@ -55,8 +61,8 @@ public final class Size extends RawOp implements Operand { private Output output; - private Size(Operation operation) { - super(operation); + public Size(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -109,6 +115,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Size.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java index f4ef6d93e92..950b9a6e164 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Skipgram.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -34,6 +36,10 @@ /** * Parses a text file and creates a batch of examples. */ +@OpMetadata( + opType = Skipgram.OP_NAME, + inputsClass = Skipgram.Inputs.class +) @Operator public final class Skipgram extends RawOp { /** @@ -55,8 +61,8 @@ public final class Skipgram extends RawOp { private Output labels; - private Skipgram(Operation operation) { - super(operation); + public Skipgram(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; vocabWord = operation.output(outputIdx++); vocabFreq = operation.output(outputIdx++); @@ -243,6 +249,9 @@ public Options subsample(Float subsample) { } } + @OpInputsMetadata( + outputsClass = Skipgram.class + ) public static class Inputs extends RawOpInputs { /** * The corpus's text file name. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java index 733a500cca8..14e2bffac60 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Slice.OP_NAME, + inputsClass = Slice.Inputs.class +) @Operator public final class Slice extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class Slice extends RawOp implements Operand { private Output output; - private Slice(Operation operation) { - super(operation); + public Slice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Slice.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java index 10ccbddac2f..e14bca38cb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Snapshot.OP_NAME, + inputsClass = Snapshot.Inputs.class +) @Operator public final class Snapshot extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class Snapshot extends RawOp implements Operand private Output output; - private Snapshot(Operation operation) { - super(operation); + public Snapshot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Snapshot.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java index d7a39406724..740db3931cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -133,6 +135,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SpaceToBatchNd.OP_NAME, + inputsClass = SpaceToBatchNd.Inputs.class +) @Operator public final class SpaceToBatchNd extends RawOp implements Operand { /** @@ -142,8 +148,8 @@ public final class SpaceToBatchNd extends RawOp implements Oper private Output output; - private SpaceToBatchNd(Operation operation) { - super(operation); + public SpaceToBatchNd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -188,6 +194,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SpaceToBatchNd.class + ) public static class Inputs extends RawOpInputs> { /** * N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java index def301c84d3..85ff8a9bd2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Split.OP_NAME, + inputsClass = Split.Inputs.class +) @Operator public final class Split extends RawOp implements Iterable> { /** @@ -49,8 +55,8 @@ public final class Split extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private Split(Operation operation) { - super(operation); + public Split(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -98,6 +104,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = Split.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The dimension along which to split. Must be in the range diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java index 26d0cc172ce..77aad26aae6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SplitV.OP_NAME, + inputsClass = SplitV.Inputs.class +) @Operator public final class SplitV extends RawOp implements Iterable> { /** @@ -50,8 +56,8 @@ public final class SplitV extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private SplitV(Operation operation) { - super(operation); + public SplitV(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -102,6 +108,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = SplitV.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to split. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java index d9c8956f5a1..d7a877da72c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Squeeze.OP_NAME, + inputsClass = Squeeze.Inputs.class +) @Operator public final class Squeeze extends RawOp implements Operand { /** @@ -60,8 +66,8 @@ public final class Squeeze extends RawOp implements Operand private Output output; - private Squeeze(Operation operation) { - super(operation); + public Squeeze(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -171,6 +177,9 @@ public Options axis(Long... axis) { } } + @OpInputsMetadata( + outputsClass = Squeeze.class + ) public static class Inputs extends RawOpInputs> { /** * The {@code input} to squeeze. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java index be4cdfe3921..a5378efd62e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -52,6 +54,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Stack.OP_NAME, + inputsClass = Stack.Inputs.class +) @Operator public final class Stack extends RawOp implements Operand { /** @@ -61,8 +67,8 @@ public final class Stack extends RawOp implements Operand { private Output output; - private Stack(Operation operation) { - super(operation); + public Stack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = Stack.class + ) public static class Inputs extends RawOpInputs> { /** * Must be of same shape and type. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java index adc05358314..0275d51c1d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stage.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; @@ -35,6 +37,10 @@ * The basic functionality of this Op is similar to a queue with many * fewer capabilities and options. This Op is optimized for performance. */ +@OpMetadata( + opType = Stage.OP_NAME, + inputsClass = Stage.Inputs.class +) @Operator public final class Stage extends RawOp { /** @@ -42,8 +48,8 @@ public final class Stage extends RawOp { */ public static final String OP_NAME = "Stage"; - private Stage(Operation operation) { - super(operation); + public Stage(Operation operation) { + super(operation, OP_NAME); } /** @@ -186,6 +192,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Stage.class + ) public static class Inputs extends RawOpInputs { /** * a list of tensors diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java index 9431054d8a7..d549df79ab0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageClear.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Op removes all elements in the underlying container. */ +@OpMetadata( + opType = StageClear.OP_NAME, + inputsClass = StageClear.Inputs.class +) @Operator public final class StageClear extends RawOp { /** @@ -41,8 +47,8 @@ public final class StageClear extends RawOp { */ public static final String OP_NAME = "StageClear"; - private StageClear(Operation operation) { - super(operation); + public StageClear(Operation operation) { + super(operation, OP_NAME); } /** @@ -179,6 +185,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = StageClear.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java index dab381005af..cd8b4d3995a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StagePeek.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * this op will block until it does. This Op is optimized for * performance. */ +@OpMetadata( + opType = StagePeek.OP_NAME, + inputsClass = StagePeek.Inputs.class +) @Operator public final class StagePeek extends RawOp implements Iterable> { /** @@ -51,8 +57,8 @@ public final class StagePeek extends RawOp implements Iterable> { private List> values; @SuppressWarnings("unchecked") - private StagePeek(Operation operation) { - super(operation); + public StagePeek(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int valuesLength = operation.outputListLength("values"); values = Arrays.asList(operation.outputList(outputIdx, valuesLength)); @@ -210,6 +216,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = StagePeek.class + ) public static class Inputs extends RawOpInputs { /** * The index input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java index 609a40599a3..f107766a24b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StageSize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ /** * Op returns the number of elements in the underlying container. */ +@OpMetadata( + opType = StageSize.OP_NAME, + inputsClass = StageSize.Inputs.class +) @Operator public final class StageSize extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class StageSize extends RawOp implements Operand { private Output output; - private StageSize(Operation operation) { - super(operation); + public StageSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -200,6 +206,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = StageSize.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulCase.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulCase.java index a040d5ccd26..a965157f255 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulCase.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulCase.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -58,6 +60,10 @@ * ``` * */ +@OpMetadata( + opType = StatefulCase.OP_NAME, + inputsClass = StatefulCase.Inputs.class +) @Operator public final class StatefulCase extends RawOp implements Case { /** @@ -68,8 +74,8 @@ public final class StatefulCase extends RawOp implements Case { private List> output; @SuppressWarnings("unchecked") - private StatefulCase(Operation operation) { - super(operation); + public StatefulCase(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -135,6 +141,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = StatefulCase.class + ) public static class Inputs extends RawOpInputs { /** * The branch selector, an int32 Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulIf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulIf.java index 98dfb290406..61dbfffa2db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulIf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulIf.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ /** * output = cond ? then_branch(input) : else_branch(input) */ +@OpMetadata( + opType = StatefulIf.OP_NAME, + inputsClass = StatefulIf.Inputs.class +) @Operator public final class StatefulIf extends RawOp implements If { /** @@ -49,8 +55,8 @@ public final class StatefulIf extends RawOp implements If { private List> output; @SuppressWarnings("unchecked") - private StatefulIf(Operation operation) { - super(operation); + public StatefulIf(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -124,6 +130,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = StatefulIf.class + ) public static class Inputs extends RawOpInputs { /** *

diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulPartitionedCall.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulPartitionedCall.java
index 00572804fb3..15d25454f16 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulPartitionedCall.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulPartitionedCall.java
@@ -31,6 +31,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -38,6 +40,10 @@
 /**
  * returns {@code f(inputs)}, where {@code f}'s body is placed and partitioned.
  */
+@OpMetadata(
+    opType = StatefulPartitionedCall.OP_NAME,
+    inputsClass = StatefulPartitionedCall.Inputs.class
+)
 @Operator
 public final class StatefulPartitionedCall extends RawOp implements PartitionedCall {
   /**
@@ -48,8 +54,8 @@ public final class StatefulPartitionedCall extends RawOp implements PartitionedC
   private List> output;
 
   @SuppressWarnings("unchecked")
-  private StatefulPartitionedCall(Operation operation) {
-    super(operation);
+  public StatefulPartitionedCall(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int outputLength = operation.outputListLength("output");
     output = Arrays.asList(operation.outputList(outputIdx, outputLength));
@@ -113,6 +119,9 @@ public Iterator> iterator() {
     return (Iterator) output.iterator();
   }
 
+  @OpInputsMetadata(
+      outputsClass = StatefulPartitionedCall.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * A list of input tensors.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulWhile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulWhile.java
index 0297bdd06ea..5021c2402d3 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulWhile.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatefulWhile.java
@@ -32,6 +32,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -39,6 +41,10 @@
 /**
  * output = input; While (Cond(output)) { output = Body(output) }
  */
+@OpMetadata(
+    opType = StatefulWhile.OP_NAME,
+    inputsClass = StatefulWhile.Inputs.class
+)
 @Operator
 public final class StatefulWhile extends RawOp implements While {
   /**
@@ -49,8 +55,8 @@ public final class StatefulWhile extends RawOp implements While {
   private List> output;
 
   @SuppressWarnings("unchecked")
-  private StatefulWhile(Operation operation) {
-    super(operation);
+  public StatefulWhile(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int outputLength = operation.outputListLength("output");
     output = Arrays.asList(operation.outputList(outputIdx, outputLength));
@@ -121,6 +127,9 @@ public Iterator> iterator() {
     return (Iterator) output.iterator();
   }
 
+  @OpInputsMetadata(
+      outputsClass = StatefulWhile.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * A list of input tensors whose types are T.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java
index 3b9da5431de..6e0b000d3a3 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessCase.java
@@ -32,6 +32,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TInt32;
 import org.tensorflow.types.family.TType;
@@ -59,6 +61,10 @@
  * This should only be used when the none of branches has stateful ops.
  * 
*/ +@OpMetadata( + opType = StatelessCase.OP_NAME, + inputsClass = StatelessCase.Inputs.class +) public final class StatelessCase extends RawOp implements Case { /** * The name of this op, as known by TensorFlow core engine @@ -68,8 +74,8 @@ public final class StatelessCase extends RawOp implements Case { private List> output; @SuppressWarnings("unchecked") - private StatelessCase(Operation operation) { - super(operation); + public StatelessCase(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -135,6 +141,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = StatelessCase.class + ) public static class Inputs extends RawOpInputs { /** * The branch selector, an int32 Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessIf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessIf.java index e4d4a60e693..2e9d3b4a12a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessIf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessIf.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ /** * output = cond ? then_branch(input) : else_branch(input) */ +@OpMetadata( + opType = StatelessIf.OP_NAME, + inputsClass = StatelessIf.Inputs.class +) @Operator public final class StatelessIf extends RawOp implements If { /** @@ -49,8 +55,8 @@ public final class StatelessIf extends RawOp implements If { private List> output; @SuppressWarnings("unchecked") - private StatelessIf(Operation operation) { - super(operation); + public StatelessIf(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -127,6 +133,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = StatelessIf.class + ) public static class Inputs extends RawOpInputs { /** *
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessPartitionedCall.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessPartitionedCall.java
index f819677d6e7..6a0494a59d3 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessPartitionedCall.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessPartitionedCall.java
@@ -31,6 +31,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -41,6 +43,10 @@
  * within a single process. The kernel places and partitions a given function's
  * underlying graph, and executes each of the partitioned subgraphs as a function.
  */
+@OpMetadata(
+    opType = StatelessPartitionedCall.OP_NAME,
+    inputsClass = StatelessPartitionedCall.Inputs.class
+)
 @Operator
 public final class StatelessPartitionedCall extends RawOp implements PartitionedCall {
   /**
@@ -51,8 +57,8 @@ public final class StatelessPartitionedCall extends RawOp implements Partitioned
   private List> output;
 
   @SuppressWarnings("unchecked")
-  private StatelessPartitionedCall(Operation operation) {
-    super(operation);
+  public StatelessPartitionedCall(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int outputLength = operation.outputListLength("output");
     output = Arrays.asList(operation.outputList(outputIdx, outputLength));
@@ -115,6 +121,9 @@ public Iterator> iterator() {
     return (Iterator) output.iterator();
   }
 
+  @OpInputsMetadata(
+      outputsClass = StatelessPartitionedCall.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * A list of input tensors.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessWhile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessWhile.java
index da5e3146ec7..cae67f4082e 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessWhile.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StatelessWhile.java
@@ -32,6 +32,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -39,6 +41,10 @@
 /**
  * output = input; While (Cond(output)) { output = Body(output) }
  */
+@OpMetadata(
+    opType = StatelessWhile.OP_NAME,
+    inputsClass = StatelessWhile.Inputs.class
+)
 @Operator
 public final class StatelessWhile extends RawOp implements While {
   /**
@@ -49,8 +55,8 @@ public final class StatelessWhile extends RawOp implements While {
   private List> output;
 
   @SuppressWarnings("unchecked")
-  private StatelessWhile(Operation operation) {
-    super(operation);
+  public StatelessWhile(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     int outputLength = operation.outputListLength("output");
     output = Arrays.asList(operation.outputList(outputIdx, outputLength));
@@ -124,6 +130,9 @@ public Iterator> iterator() {
     return (Iterator) output.iterator();
   }
 
+  @OpInputsMetadata(
+      outputsClass = StatelessWhile.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * A list of input tensors whose types are T.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java
index 861c19e863d..5cedbd6fe57 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -86,6 +88,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = StopGradient.OP_NAME,
+    inputsClass = StopGradient.Inputs.class
+)
 @Operator
 public final class StopGradient extends RawOp implements Operand {
   /**
@@ -95,8 +101,8 @@ public final class StopGradient extends RawOp implements Operan
 
   private Output output;
 
-  private StopGradient(Operation operation) {
-    super(operation);
+  public StopGradient(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -132,6 +138,9 @@ public Output asOutput() {
     return output;
   }
 
+  @OpInputsMetadata(
+      outputsClass = StopGradient.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The input input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java
index 3eeabb0160b..bc77fdb4639 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -134,6 +136,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = StridedSlice.OP_NAME,
+    inputsClass = StridedSlice.Inputs.class
+)
 @Operator
 public final class StridedSlice extends RawOp implements Operand {
   /**
@@ -143,8 +149,8 @@ public final class StridedSlice extends RawOp implements Operan
 
   private Output output;
 
-  private StridedSlice(Operation operation) {
-    super(operation);
+  public StridedSlice(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -370,6 +376,9 @@ public Options shrinkAxisMask(Long shrinkAxisMask) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = StridedSlice.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The input input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java
index 644018b6e74..1ebe8c6fdc4 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -42,6 +44,10 @@
  *
  * @param  data type for {@code output_ref} output
  */
+@OpMetadata(
+    opType = StridedSliceAssign.OP_NAME,
+    inputsClass = StridedSliceAssign.Inputs.class
+)
 @Operator
 public final class StridedSliceAssign extends RawOp implements Operand {
   /**
@@ -51,8 +57,8 @@ public final class StridedSliceAssign extends RawOp implements
 
   private Output outputRef;
 
-  private StridedSliceAssign(Operation operation) {
-    super(operation);
+  public StridedSliceAssign(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     outputRef = operation.output(outputIdx++);
   }
@@ -242,6 +248,9 @@ public Options shrinkAxisMask(Long shrinkAxisMask) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = StridedSliceAssign.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The ref input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java
index 07947acab0a..6588d96158d 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -44,6 +46,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = StridedSliceGrad.OP_NAME,
+    inputsClass = StridedSliceGrad.Inputs.class
+)
 @Operator
 public final class StridedSliceGrad extends RawOp implements Operand {
   /**
@@ -53,8 +59,8 @@ public final class StridedSliceGrad extends RawOp implements Op
 
   private Output output;
 
-  private StridedSliceGrad(Operation operation) {
-    super(operation);
+  public StridedSliceGrad(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -244,6 +250,9 @@ public Options shrinkAxisMask(Long shrinkAxisMask) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = StridedSliceGrad.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The shape input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java
index 5df5a25d98e..579be3dc3ff 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TNumber;
@@ -41,6 +43,10 @@
  *
  * @param  data type for {@code output} output
  */
+@OpMetadata(
+    opType = Sum.OP_NAME,
+    inputsClass = Sum.Inputs.class
+)
 @Operator
 public final class Sum extends RawOp implements Operand {
   /**
@@ -50,8 +56,8 @@ public final class Sum extends RawOp implements Operand {
 
   private Output output;
 
-  private Sum(Operation operation) {
-    super(operation);
+  public Sum(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -130,6 +136,9 @@ public Options keepDims(Boolean keepDims) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = Sum.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The tensor to reduce.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java
index 00f21eb34bc..26c17fc6187 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TBool;
@@ -40,6 +42,10 @@
  *
  * @param  data type for {@code output_false} output
  */
+@OpMetadata(
+    opType = SwitchCond.OP_NAME,
+    inputsClass = SwitchCond.Inputs.class
+)
 @Operator
 public final class SwitchCond extends RawOp {
   /**
@@ -51,8 +57,8 @@ public final class SwitchCond extends RawOp {
 
   private Output outputTrue;
 
-  private SwitchCond(Operation operation) {
-    super(operation);
+  public SwitchCond(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     outputFalse = operation.output(outputIdx++);
     outputTrue = operation.output(outputIdx++);
@@ -96,6 +102,9 @@ public Output outputTrue() {
     return outputTrue;
   }
 
+  @OpInputsMetadata(
+      outputsClass = SwitchCond.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The tensor to be forwarded to the appropriate output.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java
index a16282ea459..6e6c0d6b835 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.family.TType;
@@ -49,6 +51,10 @@
  *
  * @param  data type for {@code ref} output
  */
+@OpMetadata(
+    opType = TemporaryVariable.OP_NAME,
+    inputsClass = TemporaryVariable.Inputs.class
+)
 @Operator
 public final class TemporaryVariable extends RawOp implements Operand {
   /**
@@ -58,8 +64,8 @@ public final class TemporaryVariable extends RawOp implements O
 
   private Output ref;
 
-  private TemporaryVariable(Operation operation) {
-    super(operation);
+  public TemporaryVariable(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     ref = operation.output(outputIdx++);
   }
@@ -139,6 +145,9 @@ public Options varName(String varName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = TemporaryVariable.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The shape of the variable tensor.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java
index 34ca0e49eab..63ed41619b2 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArray.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -39,6 +41,10 @@
  * An array of Tensors of given size.
  * Write data via Write and read via Read or Pack.
  */
+@OpMetadata(
+    opType = TensorArray.OP_NAME,
+    inputsClass = TensorArray.Inputs.class
+)
 @Operator
 public final class TensorArray extends RawOp {
   /**
@@ -51,8 +57,8 @@ public final class TensorArray extends RawOp {
   private Output flow;
 
   @SuppressWarnings("unchecked")
-  private TensorArray(Operation operation) {
-    super(operation);
+  public TensorArray(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     handle = operation.output(outputIdx++);
     flow = operation.output(outputIdx++);
@@ -263,6 +269,9 @@ public Options tensorArrayName(String tensorArrayName) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArray.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The size of the array.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java
index df1db0abdba..03b585921e4 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayClose.java
@@ -26,6 +26,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.types.family.TType;
 
@@ -34,6 +36,10 @@
  * This enables the user to close and release the resource in the middle
  * of a step/run.
  */
+@OpMetadata(
+    opType = TensorArrayClose.OP_NAME,
+    inputsClass = TensorArrayClose.Inputs.class
+)
 @Operator
 public final class TensorArrayClose extends RawOp {
   /**
@@ -41,8 +47,8 @@ public final class TensorArrayClose extends RawOp {
    */
   public static final String OP_NAME = "TensorArrayCloseV3";
 
-  private TensorArrayClose(Operation operation) {
-    super(operation);
+  public TensorArrayClose(Operation operation) {
+    super(operation, OP_NAME);
   }
 
   /**
@@ -61,6 +67,9 @@ public static TensorArrayClose create(Scope scope, Operand hand
     return new TensorArrayClose(opBuilder.build());
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayClose.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java
index decac97c386..70212b57cf1 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -47,6 +49,10 @@
  *
  * @param  data type for {@code value} output
  */
+@OpMetadata(
+    opType = TensorArrayConcat.OP_NAME,
+    inputsClass = TensorArrayConcat.Inputs.class
+)
 @Operator
 public final class TensorArrayConcat extends RawOp {
   /**
@@ -58,8 +64,8 @@ public final class TensorArrayConcat extends RawOp {
 
   private Output lengths;
 
-  private TensorArrayConcat(Operation operation) {
-    super(operation);
+  public TensorArrayConcat(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     value = operation.output(outputIdx++);
     lengths = operation.output(outputIdx++);
@@ -154,6 +160,9 @@ public Options elementShapeExcept0(Shape elementShapeExcept0) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayConcat.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The handle to a TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java
index e21dd4eea21..2cd86b61c22 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -41,6 +43,10 @@
  *
  * @param  data type for {@code value} output
  */
+@OpMetadata(
+    opType = TensorArrayGather.OP_NAME,
+    inputsClass = TensorArrayGather.Inputs.class
+)
 @Operator
 public final class TensorArrayGather extends RawOp implements Operand {
   /**
@@ -50,8 +56,8 @@ public final class TensorArrayGather extends RawOp implements O
 
   private Output value;
 
-  private TensorArrayGather(Operation operation) {
-    super(operation);
+  public TensorArrayGather(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     value = operation.output(outputIdx++);
   }
@@ -139,6 +145,9 @@ public Options elementShape(Shape elementShape) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayGather.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The handle to a TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java
index 2b42e5dfe78..adccb742ebf 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGrad.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.types.TFloat32;
 import org.tensorflow.types.family.TType;
@@ -62,6 +64,10 @@
  * name when performing the creation / lookup, so that each separate gradient
  * calculation gets its own TensorArray accumulator.
  */
+@OpMetadata(
+    opType = TensorArrayGrad.OP_NAME,
+    inputsClass = TensorArrayGrad.Inputs.class
+)
 @Operator
 public final class TensorArrayGrad extends RawOp {
   /**
@@ -74,8 +80,8 @@ public final class TensorArrayGrad extends RawOp {
   private Output flowOut;
 
   @SuppressWarnings("unchecked")
-  private TensorArrayGrad(Operation operation) {
-    super(operation);
+  public TensorArrayGrad(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     gradHandle = operation.output(outputIdx++);
     flowOut = operation.output(outputIdx++);
@@ -121,6 +127,9 @@ public Output flowOut() {
     return flowOut;
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayGrad.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The handle to the forward TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java
index 163dec99fc4..68777024253 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGradWithShape.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.types.TFloat32;
 import org.tensorflow.types.TInt32;
@@ -39,6 +41,10 @@
  * computed. This enables multiple gradients for the same TensorArray to be
  * calculated using the same accumulator.
  */
+@OpMetadata(
+    opType = TensorArrayGradWithShape.OP_NAME,
+    inputsClass = TensorArrayGradWithShape.Inputs.class
+)
 @Operator
 public final class TensorArrayGradWithShape extends RawOp {
   /**
@@ -51,8 +57,8 @@ public final class TensorArrayGradWithShape extends RawOp {
   private Output flowOut;
 
   @SuppressWarnings("unchecked")
-  private TensorArrayGradWithShape(Operation operation) {
-    super(operation);
+  public TensorArrayGradWithShape(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     gradHandle = operation.output(outputIdx++);
     flowOut = operation.output(outputIdx++);
@@ -102,6 +108,9 @@ public Output flowOut() {
     return flowOut;
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayGradWithShape.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The handle to the forward TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java
index 41fe676090e..c277435d413 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java
@@ -29,6 +29,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -40,6 +42,10 @@
  *
  * @param  data type for {@code value} output
  */
+@OpMetadata(
+    opType = TensorArrayPack.OP_NAME,
+    inputsClass = TensorArrayPack.Inputs.class
+)
 @Operator
 public final class TensorArrayPack extends RawOp implements Operand {
   /**
@@ -49,8 +55,8 @@ public final class TensorArrayPack extends RawOp implements Ope
 
   private Output value;
 
-  private TensorArrayPack(Operation operation) {
-    super(operation);
+  public TensorArrayPack(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     value = operation.output(outputIdx++);
   }
@@ -130,6 +136,9 @@ public Options elementShape(Shape elementShape) {
     }
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayPack.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The handle input
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java
index e34c494fa0d..dc0f921e8bc 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java
@@ -28,6 +28,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -39,6 +41,10 @@
  *
  * @param  data type for {@code value} output
  */
+@OpMetadata(
+    opType = TensorArrayRead.OP_NAME,
+    inputsClass = TensorArrayRead.Inputs.class
+)
 @Operator
 public final class TensorArrayRead extends RawOp implements Operand {
   /**
@@ -48,8 +54,8 @@ public final class TensorArrayRead extends RawOp implements Ope
 
   private Output value;
 
-  private TensorArrayRead(Operation operation) {
-    super(operation);
+  public TensorArrayRead(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     value = operation.output(outputIdx++);
   }
@@ -93,6 +99,9 @@ public Output asOutput() {
     return value;
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayRead.class
+  )
   public static class Inputs extends RawOpInputs> {
     /**
      * The handle to a TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java
index af15fe9187e..3bd2ec548dd 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayScatter.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -37,6 +39,10 @@
  * Scatter the data from the input value into specific TensorArray elements.
  * {@code indices} must be a vector, its length must match the first dim of {@code value}.
  */
+@OpMetadata(
+    opType = TensorArrayScatter.OP_NAME,
+    inputsClass = TensorArrayScatter.Inputs.class
+)
 @Operator
 public final class TensorArrayScatter extends RawOp implements Operand {
   /**
@@ -46,8 +52,8 @@ public final class TensorArrayScatter extends RawOp implements Operand
 
   private Output flowOut;
 
-  private TensorArrayScatter(Operation operation) {
-    super(operation);
+  public TensorArrayScatter(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     flowOut = operation.output(outputIdx++);
   }
@@ -89,6 +95,9 @@ public Output asOutput() {
     return flowOut;
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArrayScatter.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The handle to a TensorArray.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java
index 6bfc5e291b1..82fb8c2be4c 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySize.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.types.TFloat32;
 import org.tensorflow.types.TInt32;
@@ -35,6 +37,10 @@
 /**
  * Get the current size of the TensorArray.
  */
+@OpMetadata(
+    opType = TensorArraySize.OP_NAME,
+    inputsClass = TensorArraySize.Inputs.class
+)
 @Operator
 public final class TensorArraySize extends RawOp implements Operand {
   /**
@@ -44,8 +50,8 @@ public final class TensorArraySize extends RawOp implements Operand {
 
   private Output output;
 
-  private TensorArraySize(Operation operation) {
-    super(operation);
+  public TensorArraySize(Operation operation) {
+    super(operation, OP_NAME);
     int outputIdx = 0;
     output = operation.output(outputIdx++);
   }
@@ -83,6 +89,9 @@ public Output asOutput() {
     return output;
   }
 
+  @OpInputsMetadata(
+      outputsClass = TensorArraySize.class
+  )
   public static class Inputs extends RawOpInputs {
     /**
      * The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java
index 3ce1424ae9c..bc0818bbc75 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArraySplit.java
@@ -27,6 +27,8 @@
 import org.tensorflow.op.RawOpInputs;
 import org.tensorflow.op.Scope;
 import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
 import org.tensorflow.op.annotation.Operator;
 import org.tensorflow.proto.framework.DataType;
 import org.tensorflow.types.TFloat32;
@@ -45,6 +47,10 @@
  * 

and having size *

{@code nt x d0 x d1 x ...} */ +@OpMetadata( + opType = TensorArraySplit.OP_NAME, + inputsClass = TensorArraySplit.Inputs.class +) @Operator public final class TensorArraySplit extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class TensorArraySplit extends RawOp implements Operand { private Output flowOut; - private TensorArraySplit(Operation operation) { - super(operation); + public TensorArraySplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; flowOut = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return flowOut; } + @OpInputsMetadata( + outputsClass = TensorArraySplit.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a TensorArray. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java index ce65c26492a..fb7040c25b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayUnpack.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -36,6 +38,10 @@ /** * The TensorArrayUnpack operation */ +@OpMetadata( + opType = TensorArrayUnpack.OP_NAME, + inputsClass = TensorArrayUnpack.Inputs.class +) @Operator public final class TensorArrayUnpack extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorArrayUnpack extends RawOp implements Operand private Output flowOut; - private TensorArrayUnpack(Operation operation) { - super(operation); + public TensorArrayUnpack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; flowOut = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return flowOut; } + @OpInputsMetadata( + outputsClass = TensorArrayUnpack.class + ) public static class Inputs extends RawOpInputs { /** * The handle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java index e44e81a294d..8e18aa00dea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayWrite.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -36,6 +38,10 @@ /** * Push an element onto the tensor_array. */ +@OpMetadata( + opType = TensorArrayWrite.OP_NAME, + inputsClass = TensorArrayWrite.Inputs.class +) @Operator public final class TensorArrayWrite extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorArrayWrite extends RawOp implements Operand { private Output flowOut; - private TensorArrayWrite(Operation operation) { - super(operation); + public TensorArrayWrite(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; flowOut = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return flowOut; } + @OpInputsMetadata( + outputsClass = TensorArrayWrite.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a TensorArray. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java index 7b77b4870be..13766f6df52 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = TensorListConcat.OP_NAME, + inputsClass = TensorListConcat.Inputs.class +) @Operator public final class TensorListConcat extends RawOp { /** @@ -60,8 +66,8 @@ public final class TensorListConcat extends RawOp { private Output lengths; - private TensorListConcat(Operation operation) { - super(operation); + public TensorListConcat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); lengths = operation.output(outputIdx++); @@ -110,6 +116,9 @@ public Output lengths() { return lengths; } + @OpInputsMetadata( + outputsClass = TensorListConcat.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java index 1d1fc91e00d..617bec84468 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcatLists.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ /** * The TensorListConcatLists operation */ +@OpMetadata( + opType = TensorListConcatLists.OP_NAME, + inputsClass = TensorListConcatLists.Inputs.class +) @Operator public final class TensorListConcatLists extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorListConcatLists extends RawOp implements Operand private Output output; @SuppressWarnings("unchecked") - private TensorListConcatLists(Operation operation) { - super(operation); + public TensorListConcatLists(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = TensorListConcatLists.class + ) public static class Inputs extends RawOpInputs { /** * The inputA input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java index dbc4eb91ec5..472d8072db3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code element_shape} output */ +@OpMetadata( + opType = TensorListElementShape.OP_NAME, + inputsClass = TensorListElementShape.Inputs.class +) @Operator public final class TensorListElementShape extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class TensorListElementShape extends RawOp imple private Output elementShape; - private TensorListElementShape(Operation operation) { - super(operation); + public TensorListElementShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; elementShape = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return elementShape; } + @OpInputsMetadata( + outputsClass = TensorListElementShape.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java index f193b8c7f13..cb090a2f34a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListFromTensor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ *

tensor: The input tensor. * output_handle: The list. */ +@OpMetadata( + opType = TensorListFromTensor.OP_NAME, + inputsClass = TensorListFromTensor.Inputs.class +) @Operator public final class TensorListFromTensor extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class TensorListFromTensor extends RawOp implements Operand private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListFromTensor(Operation operation) { - super(operation); + public TensorListFromTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListFromTensor.class + ) public static class Inputs extends RawOpInputs { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java index d4e08702d4d..cd0732e2bf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = TensorListGather.OP_NAME, + inputsClass = TensorListGather.Inputs.class +) @Operator public final class TensorListGather extends RawOp implements Operand { /** @@ -52,8 +58,8 @@ public final class TensorListGather extends RawOp implements Op private Output values; - private TensorListGather(Operation operation) { - super(operation); + public TensorListGather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; values = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return values; } + @OpInputsMetadata( + outputsClass = TensorListGather.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java index c1841854a1e..f39b65da026 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code item} output */ +@OpMetadata( + opType = TensorListGetItem.OP_NAME, + inputsClass = TensorListGetItem.Inputs.class +) @Operator public final class TensorListGetItem extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class TensorListGetItem extends RawOp implements O private Output item; - private TensorListGetItem(Operation operation) { - super(operation); + public TensorListGetItem(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; item = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return item; } + @OpInputsMetadata( + outputsClass = TensorListGetItem.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java index d3b70223fe1..390b4a4b5ff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListLength.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * input_handle: the input list * length: the number of tensors in the list */ +@OpMetadata( + opType = TensorListLength.OP_NAME, + inputsClass = TensorListLength.Inputs.class +) @Operator public final class TensorListLength extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorListLength extends RawOp implements Operand { private Output length; - private TensorListLength(Operation operation) { - super(operation); + public TensorListLength(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; length = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return length; } + @OpInputsMetadata( + outputsClass = TensorListLength.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java index 7656c8f71b8..8d89bb811e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = TensorListPopBack.OP_NAME, + inputsClass = TensorListPopBack.Inputs.class +) @Operator public final class TensorListPopBack extends RawOp { /** @@ -55,8 +61,8 @@ public final class TensorListPopBack extends RawOp { private Output tensor; @SuppressWarnings("unchecked") - private TensorListPopBack(Operation operation) { - super(operation); + public TensorListPopBack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); tensor = operation.output(outputIdx++); @@ -102,6 +108,9 @@ public Output tensor() { return tensor; } + @OpInputsMetadata( + outputsClass = TensorListPopBack.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java index d5d4b471c56..4349d023f08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBack.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. */ +@OpMetadata( + opType = TensorListPushBack.OP_NAME, + inputsClass = TensorListPushBack.Inputs.class +) @Operator public final class TensorListPushBack extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class TensorListPushBack extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListPushBack(Operation operation) { - super(operation); + public TensorListPushBack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListPushBack.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java index 694cc698ca3..1e011f256ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPushBackBatch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * The TensorListPushBackBatch operation */ +@OpMetadata( + opType = TensorListPushBackBatch.OP_NAME, + inputsClass = TensorListPushBackBatch.Inputs.class +) @Operator public final class TensorListPushBackBatch extends RawOp implements Operand { /** @@ -44,8 +50,8 @@ public final class TensorListPushBackBatch extends RawOp implements Operand outputHandles; @SuppressWarnings("unchecked") - private TensorListPushBackBatch(Operation operation) { - super(operation); + public TensorListPushBackBatch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandles = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return (Output) outputHandles; } + @OpInputsMetadata( + outputsClass = TensorListPushBackBatch.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandles input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java index b539e5114a7..ff029d62d9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListReserve.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * handle: the output list * element_dtype: the desired type of elements in the list. */ +@OpMetadata( + opType = TensorListReserve.OP_NAME, + inputsClass = TensorListReserve.Inputs.class +) @Operator public final class TensorListReserve extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class TensorListReserve extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TensorListReserve(Operation operation) { - super(operation); + public TensorListReserve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TensorListReserve.class + ) public static class Inputs extends RawOpInputs { /** * The elementShape input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java index 445f0177118..489e619b61a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListResize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * input_handle: the input list * size: size of the output list */ +@OpMetadata( + opType = TensorListResize.OP_NAME, + inputsClass = TensorListResize.Inputs.class +) @Operator public final class TensorListResize extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class TensorListResize extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListResize(Operation operation) { - super(operation); + public TensorListResize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListResize.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java index c321d7f227a..4d4098ad8b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * the largest index in indices. * output_handle: The TensorList. */ +@OpMetadata( + opType = TensorListScatter.OP_NAME, + inputsClass = TensorListScatter.Inputs.class +) @Operator public final class TensorListScatter extends RawOp implements Operand { /** @@ -56,8 +62,8 @@ public final class TensorListScatter extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListScatter(Operation operation) { - super(operation); + public TensorListScatter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListScatter.class + ) public static class Inputs extends RawOpInputs { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java index 15c38200e5d..5dc8e59b2e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListScatterIntoExistingList.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * indices: The indices used to index into the list. * output_handle: The TensorList. */ +@OpMetadata( + opType = TensorListScatterIntoExistingList.OP_NAME, + inputsClass = TensorListScatterIntoExistingList.Inputs.class +) @Operator public final class TensorListScatterIntoExistingList extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class TensorListScatterIntoExistingList extends RawOp implements Op private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListScatterIntoExistingList(Operation operation) { - super(operation); + public TensorListScatterIntoExistingList(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListScatterIntoExistingList.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java index d8010760cdb..8615c3073e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSetItem.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -35,6 +37,10 @@ /** * The TensorListSetItem operation */ +@OpMetadata( + opType = TensorListSetItem.OP_NAME, + inputsClass = TensorListSetItem.Inputs.class +) @Operator public final class TensorListSetItem extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorListSetItem extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListSetItem(Operation operation) { - super(operation); + public TensorListSetItem(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListSetItem.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java index 27121f34aff..aa2903b13f3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListSplit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -42,6 +44,10 @@ * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. */ +@OpMetadata( + opType = TensorListSplit.OP_NAME, + inputsClass = TensorListSplit.Inputs.class +) @Operator public final class TensorListSplit extends RawOp implements Operand { /** @@ -52,8 +58,8 @@ public final class TensorListSplit extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorListSplit(Operation operation) { - super(operation); + public TensorListSplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorListSplit.class + ) public static class Inputs extends RawOpInputs { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java index 1f0a2364c20..eb2a1e197e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = TensorListStack.OP_NAME, + inputsClass = TensorListStack.Inputs.class +) @Operator public final class TensorListStack extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class TensorListStack extends RawOp implements Ope private Output tensor; - private TensorListStack(Operation operation) { - super(operation); + public TensorListStack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); } @@ -133,6 +139,9 @@ public Options numElements(Long numElements) { } } + @OpInputsMetadata( + outputsClass = TensorListStack.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java index 93d15ad6fcf..d4271c4918e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapErase.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * output_handle: the map with value from given key removed * key: the key of the value to be erased */ +@OpMetadata( + opType = TensorMapErase.OP_NAME, + inputsClass = TensorMapErase.Inputs.class +) @Operator public final class TensorMapErase extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class TensorMapErase extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorMapErase(Operation operation) { - super(operation); + public TensorMapErase(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorMapErase.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java index 1d4659c3e24..d8807e475e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapHasKey.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -38,6 +40,10 @@ * key: the key to check * has_key: whether the key is already in the map or not */ +@OpMetadata( + opType = TensorMapHasKey.OP_NAME, + inputsClass = TensorMapHasKey.Inputs.class +) @Operator public final class TensorMapHasKey extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class TensorMapHasKey extends RawOp implements Operand { private Output hasKey; - private TensorMapHasKey(Operation operation) { - super(operation); + public TensorMapHasKey(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; hasKey = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return hasKey; } + @OpInputsMetadata( + outputsClass = TensorMapHasKey.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java index 7c4c494fe22..cc08f9340c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapInsert.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * key: the key to be inserted * value: the value to be inserted */ +@OpMetadata( + opType = TensorMapInsert.OP_NAME, + inputsClass = TensorMapInsert.Inputs.class +) @Operator public final class TensorMapInsert extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class TensorMapInsert extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private TensorMapInsert(Operation operation) { - super(operation); + public TensorMapInsert(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = TensorMapInsert.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java index 203fd5ea752..49765bdb9d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code value} output */ +@OpMetadata( + opType = TensorMapLookup.OP_NAME, + inputsClass = TensorMapLookup.Inputs.class +) @Operator public final class TensorMapLookup extends RawOp implements Operand { /** @@ -49,8 +55,8 @@ public final class TensorMapLookup extends RawOp implements Ope private Output value; - private TensorMapLookup(Operation operation) { - super(operation); + public TensorMapLookup(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; value = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return value; } + @OpInputsMetadata( + outputsClass = TensorMapLookup.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java index 8efc6ef987b..34af3944003 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * input_handle: the input map * size: the number of tensors in the map */ +@OpMetadata( + opType = TensorMapSize.OP_NAME, + inputsClass = TensorMapSize.Inputs.class +) @Operator public final class TensorMapSize extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class TensorMapSize extends RawOp implements Operand { private Output output; - private TensorMapSize(Operation operation) { - super(operation); + public TensorMapSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorMapSize.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java index 3badf5126d8..0e7c23f2ce5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code keys} output */ +@OpMetadata( + opType = TensorMapStackKeys.OP_NAME, + inputsClass = TensorMapStackKeys.Inputs.class +) @Operator public final class TensorMapStackKeys extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class TensorMapStackKeys extends RawOp implements private Output keys; - private TensorMapStackKeys(Operation operation) { - super(operation); + public TensorMapStackKeys(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; keys = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return keys; } + @OpInputsMetadata( + outputsClass = TensorMapStackKeys.class + ) public static class Inputs extends RawOpInputs> { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java index 2e676d0c7f2..29db1a5a709 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -93,6 +95,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorScatterNdAdd.OP_NAME, + inputsClass = TensorScatterNdAdd.Inputs.class +) @Operator public final class TensorScatterNdAdd extends RawOp implements Operand { /** @@ -102,8 +108,8 @@ public final class TensorScatterNdAdd extends RawOp implements private Output output; - private TensorScatterNdAdd(Operation operation) { - super(operation); + public TensorScatterNdAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorScatterNdAdd.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to copy/update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java index 8693ee3efd1..07de1b38c81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorScatterNdMax.OP_NAME, + inputsClass = TensorScatterNdMax.Inputs.class +) @Operator public final class TensorScatterNdMax extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class TensorScatterNdMax extends RawOp implements private Output output; - private TensorScatterNdMax(Operation operation) { - super(operation); + public TensorScatterNdMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorScatterNdMax.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java index 6826e3c035d..35287015c16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorScatterNdMin.OP_NAME, + inputsClass = TensorScatterNdMin.Inputs.class +) @Operator public final class TensorScatterNdMin extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class TensorScatterNdMin extends RawOp implements private Output output; - private TensorScatterNdMin(Operation operation) { - super(operation); + public TensorScatterNdMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorScatterNdMin.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java index a03f78e1f59..3188033d09c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -92,6 +94,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorScatterNdSub.OP_NAME, + inputsClass = TensorScatterNdSub.Inputs.class +) @Operator public final class TensorScatterNdSub extends RawOp implements Operand { /** @@ -101,8 +107,8 @@ public final class TensorScatterNdSub extends RawOp implements private Output output; - private TensorScatterNdSub(Operation operation) { - super(operation); + public TensorScatterNdSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -143,6 +149,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorScatterNdSub.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to copy/update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java index 3cfec508b4b..d71f06861eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -66,6 +68,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorScatterNdUpdate.OP_NAME, + inputsClass = TensorScatterNdUpdate.Inputs.class +) @Operator public final class TensorScatterNdUpdate extends RawOp implements Operand { /** @@ -75,8 +81,8 @@ public final class TensorScatterNdUpdate extends RawOp implemen private Output output; - private TensorScatterNdUpdate(Operation operation) { - super(operation); + public TensorScatterNdUpdate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -118,6 +124,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorScatterNdUpdate.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to copy/update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java index f2a68cb155a..8e95464edb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorStridedSliceUpdate.OP_NAME, + inputsClass = TensorStridedSliceUpdate.Inputs.class +) @Operator public final class TensorStridedSliceUpdate extends RawOp implements Operand { /** @@ -51,8 +57,8 @@ public final class TensorStridedSliceUpdate extends RawOp imple private Output output; - private TensorStridedSliceUpdate(Operation operation) { - super(operation); + public TensorStridedSliceUpdate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -242,6 +248,9 @@ public Options shrinkAxisMask(Long shrinkAxisMask) { } } + @OpInputsMetadata( + outputsClass = TensorStridedSliceUpdate.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java index 4dbd1ef3a64..a13733f9c51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -68,6 +70,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Tile.OP_NAME, + inputsClass = Tile.Inputs.class +) @Operator public final class Tile extends RawOp implements Operand { /** @@ -77,8 +83,8 @@ public final class Tile extends RawOp implements Operand { private Output output; - private Tile(Operation operation) { - super(operation); + public Tile(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -117,6 +123,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Tile.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java index e77de2d52ee..a19809b89a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Timestamp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat64; @@ -36,6 +38,10 @@ *

Note: the timestamp is computed when the op is executed, not when it is added * to the graph. */ +@OpMetadata( + opType = Timestamp.OP_NAME, + inputsClass = Timestamp.Inputs.class +) @Operator public final class Timestamp extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class Timestamp extends RawOp implements Operand { private Output ts; - private Timestamp(Operation operation) { - super(operation); + public Timestamp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; ts = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return ts; } + @OpInputsMetadata( + outputsClass = Timestamp.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new Timestamp(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java index 081529475b0..0d9a2444692 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKUnique.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * padding value will be returned. The semantics are not the same as * kth_order_statistic. */ +@OpMetadata( + opType = TopKUnique.OP_NAME, + inputsClass = TopKUnique.Inputs.class +) @Operator public final class TopKUnique extends RawOp { /** @@ -57,8 +63,8 @@ public final class TopKUnique extends RawOp { private Output topkIndices; - private TopKUnique(Operation operation) { - super(operation); + public TopKUnique(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; topk = operation.output(outputIdx++); topkIndices = operation.output(outputIdx++); @@ -100,6 +106,9 @@ public Output topkIndices() { return topkIndices; } + @OpInputsMetadata( + outputsClass = TopKUnique.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java index 856b03835ec..997bff2858d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TopKWithUnique.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * of K and the input size. NaNs are never returned. Subnormal numbers are flushed * to zero. */ +@OpMetadata( + opType = TopKWithUnique.OP_NAME, + inputsClass = TopKWithUnique.Inputs.class +) @Operator public final class TopKWithUnique extends RawOp { /** @@ -50,8 +56,8 @@ public final class TopKWithUnique extends RawOp { private Output topkIndices; - private TopKWithUnique(Operation operation) { - super(operation); + public TopKWithUnique(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; topk = operation.output(outputIdx++); topkIndices = operation.output(outputIdx++); @@ -93,6 +99,9 @@ public Output topkIndices() { return topkIndices; } + @OpInputsMetadata( + outputsClass = TopKWithUnique.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java index f39e219d59a..2ee6f6d8edd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -54,6 +56,10 @@ * * @param data type for {@code unbatched_tensor} output */ +@OpMetadata( + opType = Unbatch.OP_NAME, + inputsClass = Unbatch.Inputs.class +) @Operator public final class Unbatch extends RawOp implements Operand { /** @@ -63,8 +69,8 @@ public final class Unbatch extends RawOp implements Operand private Output unbatchedTensor; - private Unbatch(Operation operation) { - super(operation); + public Unbatch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; unbatchedTensor = operation.output(outputIdx++); } @@ -172,6 +178,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Unbatch.class + ) public static class Inputs extends RawOpInputs> { /** * The batchedTensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java index 71687f10ad6..c81c07ee827 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -50,6 +52,10 @@ * * @param data type for {@code batched_grad} output */ +@OpMetadata( + opType = UnbatchGrad.OP_NAME, + inputsClass = UnbatchGrad.Inputs.class +) @Operator public final class UnbatchGrad extends RawOp implements Operand { /** @@ -59,8 +65,8 @@ public final class UnbatchGrad extends RawOp implements Operand private Output batchedGrad; - private UnbatchGrad(Operation operation) { - super(operation); + public UnbatchGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; batchedGrad = operation.output(outputIdx++); } @@ -168,6 +174,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = UnbatchGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The originalInput input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java index e08cc3c6309..3b6be95c9a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -77,6 +79,10 @@ * * @param data type for {@code idx} output */ +@OpMetadata( + opType = Unique.OP_NAME, + inputsClass = Unique.Inputs.class +) @Operator public final class Unique extends RawOp { /** @@ -88,8 +94,8 @@ public final class Unique extends RawOp { private Output idx; - private Unique(Operation operation) { - super(operation); + public Unique(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); idx = operation.output(outputIdx++); @@ -156,6 +162,9 @@ public Output idx() { return idx; } + @OpInputsMetadata( + outputsClass = Unique.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java index 0fa3499fab6..3b3e441e50d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -81,6 +83,10 @@ * * @param data type for {@code idx} output */ +@OpMetadata( + opType = UniqueWithCounts.OP_NAME, + inputsClass = UniqueWithCounts.Inputs.class +) @Operator public final class UniqueWithCounts extends RawOp { /** @@ -94,8 +100,8 @@ public final class UniqueWithCounts extends private Output count; - private UniqueWithCounts(Operation operation) { - super(operation); + public UniqueWithCounts(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); idx = operation.output(outputIdx++); @@ -172,6 +178,9 @@ public Output count() { return count; } + @OpInputsMetadata( + outputsClass = UniqueWithCounts.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java index 17ca0f50647..b472efa7892 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UnravelIndex.OP_NAME, + inputsClass = UnravelIndex.Inputs.class +) @Operator public final class UnravelIndex extends RawOp implements Operand { /** @@ -62,8 +68,8 @@ public final class UnravelIndex extends RawOp implements Oper private Output output; - private UnravelIndex(Operation operation) { - super(operation); + public UnravelIndex(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnravelIndex.class + ) public static class Inputs extends RawOpInputs> { /** * An 0-D or 1-D {@code int} Tensor whose elements are indices into the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java index 0038c66838c..f82a894db5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Unstack.OP_NAME, + inputsClass = Unstack.Inputs.class +) @Operator public final class Unstack extends RawOp implements Iterable> { /** @@ -57,8 +63,8 @@ public final class Unstack extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private Unstack(Operation operation) { - super(operation); + public Unstack(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -141,6 +147,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = Unstack.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher, with {@code axis} dimension size equal to {@code num}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java index def9253c00c..e551c044389 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstage.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. */ +@OpMetadata( + opType = Unstage.OP_NAME, + inputsClass = Unstage.Inputs.class +) @Operator public final class Unstage extends RawOp implements Iterable> { /** @@ -49,8 +55,8 @@ public final class Unstage extends RawOp implements Iterable> { private List> values; @SuppressWarnings("unchecked") - private Unstage(Operation operation) { - super(operation); + public Unstage(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int valuesLength = operation.outputListLength("values"); values = Arrays.asList(operation.outputList(outputIdx, valuesLength)); @@ -206,6 +212,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Unstage.class + ) public static class Inputs extends RawOpInputs { /** * The capacity attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java index f7a13f62d04..e9b79bbeaaf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UpperBound.OP_NAME, + inputsClass = UpperBound.Inputs.class +) public final class UpperBound extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -59,8 +65,8 @@ public final class UpperBound extends RawOp implements Operan private Output output; - private UpperBound(Operation operation) { - super(operation); + public UpperBound(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -123,6 +129,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UpperBound.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D Tensor where each row is ordered. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java index 67e1454d37c..3bec2d4b16a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarHandleOp.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a handle to a Variable resource. */ +@OpMetadata( + opType = VarHandleOp.OP_NAME, + inputsClass = VarHandleOp.Inputs.class +) @Operator public final class VarHandleOp extends RawOp implements Operand { /** @@ -47,8 +53,8 @@ public final class VarHandleOp extends RawOp implements Operand { private Output resource; @SuppressWarnings("unchecked") - private VarHandleOp(Operation operation) { - super(operation); + public VarHandleOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resource = operation.output(outputIdx++); } @@ -209,6 +215,9 @@ public Options allowedDevices(String... allowedDevices) { } } + @OpInputsMetadata( + outputsClass = VarHandleOp.class + ) public static class Inputs extends RawOpInputs { /** * the container this variable is placed in. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java index 25156efd4ec..aad05c3037f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VarIsInitializedOp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Checks whether a resource handle-based variable has been initialized. */ +@OpMetadata( + opType = VarIsInitializedOp.OP_NAME, + inputsClass = VarIsInitializedOp.Inputs.class +) @Operator public final class VarIsInitializedOp extends RawOp implements Operand { /** @@ -43,8 +49,8 @@ public final class VarIsInitializedOp extends RawOp implements Operand { private Output isInitialized; - private VarIsInitializedOp(Operation operation) { - super(operation); + public VarIsInitializedOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; isInitialized = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return isInitialized; } + @OpInputsMetadata( + outputsClass = VarIsInitializedOp.class + ) public static class Inputs extends RawOpInputs { /** * the input resource handle. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java index 12eebf8189c..0ed775841cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * * @param data type for {@code ref} output */ +@OpMetadata( + opType = Variable.OP_NAME, + inputsClass = Variable.Inputs.class +) @Operator public final class Variable extends RawOp implements Operand { /** @@ -50,8 +56,8 @@ public final class Variable extends RawOp implements Operand private Output ref; - private Variable(Operation operation) { - super(operation); + public Variable(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; ref = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = Variable.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the variable tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java index f7d5a59a1b0..f5a90fb5896 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -45,6 +47,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = VariableShape.OP_NAME, + inputsClass = VariableShape.Inputs.class +) @Operator public final class VariableShape extends RawOp implements Operand { /** @@ -54,8 +60,8 @@ public final class VariableShape extends RawOp implements Ope private Output output; - private VariableShape(Operation operation) { - super(operation); + public VariableShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = VariableShape.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java index 4fc22c56aed..d10d3c5561c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Where.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -92,6 +94,10 @@ * [2, 1, 1]] *

*/ +@OpMetadata( + opType = Where.OP_NAME, + inputsClass = Where.Inputs.class +) @Operator public final class Where extends RawOp implements Operand { /** @@ -101,8 +107,8 @@ public final class Where extends RawOp implements Operand { private Output index; - private Where(Operation operation) { - super(operation); + public Where(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; index = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Output asOutput() { return index; } + @OpInputsMetadata( + outputsClass = Where.class + ) public static class Inputs extends RawOpInputs { /** * The condition input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java index a363a2eab9e..ab241b515ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * The Window operation */ +@OpMetadata( + opType = Window.OP_NAME, + inputsClass = Window.Inputs.class +) public final class Window extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class Window extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private Window(Operation operation) { - super(operation); + public Window(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = Window.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java index ece2adac5ce..ebac3a1d700 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = ZerosLike.OP_NAME, + inputsClass = ZerosLike.Inputs.class +) @Operator public final class ZerosLike extends RawOp implements Operand { /** @@ -45,8 +51,8 @@ public final class ZerosLike extends RawOp implements Operand y; - private ZerosLike(Operation operation) { - super(operation); + public ZerosLike(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = ZerosLike.class + ) public static class Inputs extends RawOpInputs> { /** * a tensor of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java index 010fdceba22..5ecb7359a44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ /** * A container for an iterator resource. */ +@OpMetadata( + opType = AnonymousIterator.OP_NAME, + inputsClass = AnonymousIterator.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class AnonymousIterator extends RawOp { private Output deleter; @SuppressWarnings("unchecked") - private AnonymousIterator(Operation operation) { - super(operation); + public AnonymousIterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); deleter = operation.output(outputIdx++); @@ -101,6 +107,9 @@ public Output deleter() { return deleter; } + @OpInputsMetadata( + outputsClass = AnonymousIterator.class + ) public static class Inputs extends RawOpInputs { /** * The outputTypes attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java index 1ab01851c85..9194702ad24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMemoryCache.java @@ -26,11 +26,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The AnonymousMemoryCache operation */ +@OpMetadata( + opType = AnonymousMemoryCache.OP_NAME, + inputsClass = AnonymousMemoryCache.Inputs.class +) public final class AnonymousMemoryCache extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class AnonymousMemoryCache extends RawOp { private Output deleter; @SuppressWarnings("unchecked") - private AnonymousMemoryCache(Operation operation) { - super(operation); + public AnonymousMemoryCache(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); deleter = operation.output(outputIdx++); @@ -81,6 +87,9 @@ public Output deleter() { return deleter; } + @OpInputsMetadata( + outputsClass = AnonymousMemoryCache.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new AnonymousMemoryCache(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java index 19f0ae37083..cce27d9053a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousMultiDeviceIterator.java @@ -29,12 +29,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * A container for a multi device iterator resource. */ +@OpMetadata( + opType = AnonymousMultiDeviceIterator.OP_NAME, + inputsClass = AnonymousMultiDeviceIterator.Inputs.class +) public final class AnonymousMultiDeviceIterator extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class AnonymousMultiDeviceIterator extends RawOp { private Output deleter; @SuppressWarnings("unchecked") - private AnonymousMultiDeviceIterator(Operation operation) { - super(operation); + public AnonymousMultiDeviceIterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); deleter = operation.output(outputIdx++); @@ -103,6 +109,9 @@ public Output deleter() { return deleter; } + @OpInputsMetadata( + outputsClass = AnonymousMultiDeviceIterator.class + ) public static class Inputs extends RawOpInputs { /** * The devices attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertCardinalityDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertCardinalityDataset.java index 004b64d085e..344fbead34c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertCardinalityDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertCardinalityDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * The AssertCardinalityDataset operation */ +@OpMetadata( + opType = AssertCardinalityDataset.OP_NAME, + inputsClass = AssertCardinalityDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class AssertCardinalityDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private AssertCardinalityDataset(Operation operation) { - super(operation); + public AssertCardinalityDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = AssertCardinalityDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java index 957589aa4c7..183b8ac9ddb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AssertNextDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -45,6 +47,10 @@ * means that the check happens after any static optimizations are applied * to the dataset graph. */ +@OpMetadata( + opType = AssertNextDataset.OP_NAME, + inputsClass = AssertNextDataset.Inputs.class +) @Operator( group = "data" ) @@ -57,8 +63,8 @@ public final class AssertNextDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private AssertNextDataset(Operation operation) { - super(operation); + public AssertNextDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = AssertNextDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java index 46150ca633f..b54f548540c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AutoShardDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -44,6 +46,10 @@ *

This dataset will throw a NotFound error if we cannot shard the dataset * automatically. */ +@OpMetadata( + opType = AutoShardDataset.OP_NAME, + inputsClass = AutoShardDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class AutoShardDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private AutoShardDataset(Operation operation) { - super(operation); + public AutoShardDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -172,6 +178,9 @@ public Options numReplicas(Long numReplicas) { } } + @OpInputsMetadata( + outputsClass = AutoShardDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java index aaf79b9104d..fbd817a8f20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ /** * Creates a dataset that batches {@code batch_size} elements from {@code input_dataset}. */ +@OpMetadata( + opType = BatchDataset.OP_NAME, + inputsClass = BatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class BatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private BatchDataset(Operation operation) { - super(operation); + public BatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options parallelCopy(Boolean parallelCopy) { } } + @OpInputsMetadata( + outputsClass = BatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java index ead0ae179b3..19eb682960b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BytesProducedStatsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * Records the bytes size of each element of {@code input_dataset} in a StatsAggregator. */ +@OpMetadata( + opType = BytesProducedStatsDataset.OP_NAME, + inputsClass = BytesProducedStatsDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class BytesProducedStatsDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private BytesProducedStatsDataset(Operation operation) { - super(operation); + public BytesProducedStatsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = BytesProducedStatsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java index 5865a5bbfff..07b39ecd081 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CSVDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -40,6 +42,10 @@ /** * The CSVDatasetV2 operation */ +@OpMetadata( + opType = CSVDataset.OP_NAME, + inputsClass = CSVDataset.Inputs.class +) @Operator( group = "data" ) @@ -52,8 +58,8 @@ public final class CSVDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private CSVDataset(Operation operation) { - super(operation); + public CSVDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -117,6 +123,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = CSVDataset.class + ) public static class Inputs extends RawOpInputs { /** * The filenames input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java index 8634e504fa2..842112ec012 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * The CacheDatasetV2 operation */ +@OpMetadata( + opType = CacheDataset.OP_NAME, + inputsClass = CacheDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class CacheDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private CacheDataset(Operation operation) { - super(operation); + public CacheDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = CacheDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestBranchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestBranchDataset.java index ce7bc509f4e..75295649425 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestBranchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestBranchDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * The ChooseFastestBranchDataset operation */ +@OpMetadata( + opType = ChooseFastestBranchDataset.OP_NAME, + inputsClass = ChooseFastestBranchDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class ChooseFastestBranchDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private ChooseFastestBranchDataset(Operation operation) { - super(operation); + public ChooseFastestBranchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -120,6 +126,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ChooseFastestBranchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java index a617ab45fa3..8550528ae05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ChooseFastestDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The ChooseFastestDataset operation */ +@OpMetadata( + opType = ChooseFastestDataset.OP_NAME, + inputsClass = ChooseFastestDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class ChooseFastestDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private ChooseFastestDataset(Operation operation) { - super(operation); + public ChooseFastestDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ChooseFastestDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDatasets input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java index 6fc99b577b4..6edd206187f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CompressElement.java @@ -28,12 +28,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Compresses a dataset element. */ +@OpMetadata( + opType = CompressElement.OP_NAME, + inputsClass = CompressElement.Inputs.class +) public final class CompressElement extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class CompressElement extends RawOp implements Operand { private Output compressed; @SuppressWarnings("unchecked") - private CompressElement(Operation operation) { - super(operation); + public CompressElement(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; compressed = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return (Output) compressed; } + @OpInputsMetadata( + outputsClass = CompressElement.class + ) public static class Inputs extends RawOpInputs { /** * The components input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java index b5f7cb8c11a..1a58173cecd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that concatenates {@code input_dataset} with {@code another_dataset}. */ +@OpMetadata( + opType = ConcatenateDataset.OP_NAME, + inputsClass = ConcatenateDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class ConcatenateDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ConcatenateDataset(Operation operation) { - super(operation); + public ConcatenateDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ConcatenateDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java index 20cf98e1806..0574b406440 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * Creates a dataset that reads data from the tf.data service. */ +@OpMetadata( + opType = DataServiceDatasetV2.OP_NAME, + inputsClass = DataServiceDatasetV2.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class DataServiceDatasetV2 extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private DataServiceDatasetV2(Operation operation) { - super(operation); + public DataServiceDatasetV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -207,6 +213,9 @@ public Options targetWorkers(String targetWorkers) { } } + @OpInputsMetadata( + outputsClass = DataServiceDatasetV2.class + ) public static class Inputs extends RawOpInputs { /** * The datasetId input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java index 4d16a0c6556..1465873848f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetCardinality.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Returns the cardinality of {@code input_dataset}. * Returns the cardinality of {@code input_dataset}. */ +@OpMetadata( + opType = DatasetCardinality.OP_NAME, + inputsClass = DatasetCardinality.Inputs.class +) @Operator( group = "data" ) @@ -46,8 +52,8 @@ public final class DatasetCardinality extends RawOp implements Operand { private Output cardinality; - private DatasetCardinality(Operation operation) { - super(operation); + public DatasetCardinality(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; cardinality = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return cardinality; } + @OpInputsMetadata( + outputsClass = DatasetCardinality.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the dataset to return cardinality for. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java index b1e0f690462..52927c5281a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFromGraph.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Creates a dataset from the given {@code graph_def}. * Creates a dataset from the provided {@code graph_def}. */ +@OpMetadata( + opType = DatasetFromGraph.OP_NAME, + inputsClass = DatasetFromGraph.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class DatasetFromGraph extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private DatasetFromGraph(Operation operation) { - super(operation); + public DatasetFromGraph(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DatasetFromGraph.class + ) public static class Inputs extends RawOpInputs { /** * The graph representation of the dataset (as serialized GraphDef). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java index b8c92cd4c61..94b2149b76f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToGraph.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Returns a serialized GraphDef representing {@code input_dataset}. * Returns a graph representation for {@code input_dataset}. */ +@OpMetadata( + opType = DatasetToGraph.OP_NAME, + inputsClass = DatasetToGraph.Inputs.class +) @Operator( group = "data" ) @@ -46,8 +52,8 @@ public final class DatasetToGraph extends RawOp implements Operand { private Output graph; - private DatasetToGraph(Operation operation) { - super(operation); + public DatasetToGraph(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; graph = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options stripDeviceAssignment(Boolean stripDeviceAssignment) { } } + @OpInputsMetadata( + outputsClass = DatasetToGraph.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the dataset to return the graph representation for. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java index 1ebef27faa2..bf038dd2493 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Outputs the single element from the given dataset. */ +@OpMetadata( + opType = DatasetToSingleElement.OP_NAME, + inputsClass = DatasetToSingleElement.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class DatasetToSingleElement extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private DatasetToSingleElement(Operation operation) { - super(operation); + public DatasetToSingleElement(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -98,6 +104,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = DatasetToSingleElement.class + ) public static class Inputs extends RawOpInputs { /** * A handle to a dataset that contains a single element. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java index 02a9153d092..f52a044474b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToTfRecord.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -33,6 +35,10 @@ /** * Writes the given dataset to the given file using the TFRecord format. */ +@OpMetadata( + opType = DatasetToTfRecord.OP_NAME, + inputsClass = DatasetToTfRecord.Inputs.class +) @Operator( group = "data" ) @@ -42,8 +48,8 @@ public final class DatasetToTfRecord extends RawOp { */ public static final String OP_NAME = "DatasetToTFRecord"; - private DatasetToTfRecord(Operation operation) { - super(operation); + public DatasetToTfRecord(Operation operation) { + super(operation, OP_NAME); } /** @@ -68,6 +74,9 @@ public static DatasetToTfRecord create(Scope scope, Operand inp return new DatasetToTfRecord(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DatasetToTfRecord.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the dataset to write. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java index 6888dfa07ca..b7577b7b537 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteIterator.java @@ -26,12 +26,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * A container for an iterator resource. */ +@OpMetadata( + opType = DeleteIterator.OP_NAME, + inputsClass = DeleteIterator.Inputs.class +) @Operator( group = "data" ) @@ -41,8 +47,8 @@ public final class DeleteIterator extends RawOp { */ public static final String OP_NAME = "DeleteIterator"; - private DeleteIterator(Operation operation) { - super(operation); + public DeleteIterator(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static DeleteIterator create(Scope scope, Operand handle return new DeleteIterator(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeleteIterator.class + ) public static class Inputs extends RawOpInputs { /** * A handle to the iterator to delete. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java index d342a701ddf..a7925564c1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMemoryCache.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DeleteMemoryCache operation */ +@OpMetadata( + opType = DeleteMemoryCache.OP_NAME, + inputsClass = DeleteMemoryCache.Inputs.class +) public final class DeleteMemoryCache extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "DeleteMemoryCache"; - private DeleteMemoryCache(Operation operation) { - super(operation); + public DeleteMemoryCache(Operation operation) { + super(operation, OP_NAME); } /** @@ -60,6 +66,9 @@ public static DeleteMemoryCache create(Scope scope, Operand han return new DeleteMemoryCache(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeleteMemoryCache.class + ) public static class Inputs extends RawOpInputs { /** * The handle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java index cc1239be199..1f3e1f6f3d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeleteMultiDeviceIterator.java @@ -27,19 +27,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * A container for an iterator resource. */ +@OpMetadata( + opType = DeleteMultiDeviceIterator.OP_NAME, + inputsClass = DeleteMultiDeviceIterator.Inputs.class +) public final class DeleteMultiDeviceIterator extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "DeleteMultiDeviceIterator"; - private DeleteMultiDeviceIterator(Operation operation) { - super(operation); + public DeleteMultiDeviceIterator(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static DeleteMultiDeviceIterator create(Scope scope, return new DeleteMultiDeviceIterator(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeleteMultiDeviceIterator.class + ) public static class Inputs extends RawOpInputs { /** * A handle to the multi device iterator to delete. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java index 69815a21d5c..d28aea1ce70 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DenseToSparseBatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that batches input elements into a SparseTensor. */ +@OpMetadata( + opType = DenseToSparseBatchDataset.OP_NAME, + inputsClass = DenseToSparseBatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class DenseToSparseBatchDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private DenseToSparseBatchDataset(Operation operation) { - super(operation); + public DenseToSparseBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -104,6 +110,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DenseToSparseBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * A handle to an input dataset. Must have a single component. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java index 0492d8e3a5b..9b994548e1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DeserializeIterator.java @@ -26,12 +26,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * Converts the given variant tensor to an iterator and stores it in the given resource. */ +@OpMetadata( + opType = DeserializeIterator.OP_NAME, + inputsClass = DeserializeIterator.Inputs.class +) @Operator( group = "data" ) @@ -41,8 +47,8 @@ public final class DeserializeIterator extends RawOp { */ public static final String OP_NAME = "DeserializeIterator"; - private DeserializeIterator(Operation operation) { - super(operation); + public DeserializeIterator(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static DeserializeIterator create(Scope scope, Operand r return new DeserializeIterator(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeserializeIterator.class + ) public static class Inputs extends RawOpInputs { /** * A handle to an iterator resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java index 9f2b7602cc6..5b8bf250501 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DirectedInterleaveDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * A substitute for {@code InterleaveDataset} on a fixed list of {@code N} datasets. */ +@OpMetadata( + opType = DirectedInterleaveDataset.OP_NAME, + inputsClass = DirectedInterleaveDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class DirectedInterleaveDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private DirectedInterleaveDataset(Operation operation) { - super(operation); + public DirectedInterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options stopOnEmptyDataset(Boolean stopOnEmptyDataset) { } } + @OpInputsMetadata( + outputsClass = DirectedInterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * A dataset of scalar {@code DT_INT64} elements that determines which of the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java index 10c0ad3c957..2223010b220 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DummyIterationCounter.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DummyIterationCounter operation */ +@OpMetadata( + opType = DummyIterationCounter.OP_NAME, + inputsClass = DummyIterationCounter.Inputs.class +) public final class DummyIterationCounter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class DummyIterationCounter extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private DummyIterationCounter(Operation operation) { - super(operation); + public DummyIterationCounter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -76,6 +82,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DummyIterationCounter.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new DummyIterationCounter(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java index 20474ebe360..3f8d67c1891 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterByLastComponentDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset containing elements of first component of {@code input_dataset} having true in the last component. */ +@OpMetadata( + opType = FilterByLastComponentDataset.OP_NAME, + inputsClass = FilterByLastComponentDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class FilterByLastComponentDataset extends RawOp implements Operand private Output output; @SuppressWarnings("unchecked") - private FilterByLastComponentDataset(Operation operation) { - super(operation); + public FilterByLastComponentDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = FilterByLastComponentDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java index 46b5c47e762..473ffa03ea3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ *

  • One tensor for each value in {@code other_arguments}.
  • * */ +@OpMetadata( + opType = FilterDataset.OP_NAME, + inputsClass = FilterDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class FilterDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private FilterDataset(Operation operation) { - super(operation); + public FilterDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = FilterDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FinalizeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FinalizeDataset.java index a020f21fddf..4f36eee9dab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FinalizeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FinalizeDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset by applying {@code tf.data.Options} to {@code input_dataset}. */ +@OpMetadata( + opType = FinalizeDataset.OP_NAME, + inputsClass = FinalizeDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class FinalizeDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private FinalizeDataset(Operation operation) { - super(operation); + public FinalizeDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -134,6 +140,9 @@ public Options hasCapturedRef(Boolean hasCapturedRef) { } } + @OpInputsMetadata( + outputsClass = FinalizeDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java index 7f0b41e25b9..5613dfd373c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * The FixedLengthRecordDatasetV2 operation */ +@OpMetadata( + opType = FixedLengthRecordDataset.OP_NAME, + inputsClass = FixedLengthRecordDataset.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class FixedLengthRecordDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private FixedLengthRecordDataset(Operation operation) { - super(operation); + public FixedLengthRecordDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = FixedLengthRecordDataset.class + ) public static class Inputs extends RawOpInputs { /** * The filenames input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java index 2a084772e7a..02fda22a75d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * Dataset variant, and FlatMapDataset will flatten successive results * into a single Dataset. */ +@OpMetadata( + opType = FlatMapDataset.OP_NAME, + inputsClass = FlatMapDataset.Inputs.class +) @Operator( group = "data" ) @@ -53,8 +59,8 @@ public final class FlatMapDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private FlatMapDataset(Operation operation) { - super(operation); + public FlatMapDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = FlatMapDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java index fa2b2c66bc6..3b5d55c07b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Creates a dataset that invokes a function to generate elements. */ +@OpMetadata( + opType = GeneratorDataset.OP_NAME, + inputsClass = GeneratorDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class GeneratorDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private GeneratorDataset(Operation operation) { - super(operation); + public GeneratorDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GeneratorDataset.class + ) public static class Inputs extends RawOpInputs { /** * The initFuncOtherArgs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByReducerDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByReducerDataset.java index 92c3b2023f8..71b10569545 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByReducerDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByReducerDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * Creates a dataset that computes a group-by on {@code input_dataset}. * Creates a dataset that computes a group-by on {@code input_dataset}. */ +@OpMetadata( + opType = GroupByReducerDataset.OP_NAME, + inputsClass = GroupByReducerDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class GroupByReducerDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private GroupByReducerDataset(Operation operation) { - super(operation); + public GroupByReducerDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -124,6 +130,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GroupByReducerDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java index 539d3cecf14..13065f77464 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * Creates a dataset that computes a windowed group-by on {@code input_dataset}. * // TODO(mrry): Support non-int64 keys. */ +@OpMetadata( + opType = GroupByWindowDataset.OP_NAME, + inputsClass = GroupByWindowDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class GroupByWindowDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private GroupByWindowDataset(Operation operation) { - super(operation); + public GroupByWindowDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GroupByWindowDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java index 60216480497..e53d9b15825 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IgnoreErrorsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that contains the elements of {@code input_dataset} ignoring errors. */ +@OpMetadata( + opType = IgnoreErrorsDataset.OP_NAME, + inputsClass = IgnoreErrorsDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class IgnoreErrorsDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private IgnoreErrorsDataset(Operation operation) { - super(operation); + public IgnoreErrorsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -134,6 +140,9 @@ public Options logWarning(Boolean logWarning) { } } + @OpInputsMetadata( + outputsClass = IgnoreErrorsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java index dba5858d2ef..16fcaa2e8c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InitializeTableFromDataset.java @@ -26,12 +26,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The InitializeTableFromDataset operation */ +@OpMetadata( + opType = InitializeTableFromDataset.OP_NAME, + inputsClass = InitializeTableFromDataset.Inputs.class +) @Operator( group = "data" ) @@ -41,8 +47,8 @@ public final class InitializeTableFromDataset extends RawOp { */ public static final String OP_NAME = "InitializeTableFromDataset"; - private InitializeTableFromDataset(Operation operation) { - super(operation); + public InitializeTableFromDataset(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static InitializeTableFromDataset create(Scope scope, Operand { /** * The tableHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java index 25685e80ba5..4cc5b8d7fdf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -44,6 +46,10 @@ * InterleaveDataset will interleave sequences of up to {@code block_length} * consecutive elements from {@code cycle_length} input elements. */ +@OpMetadata( + opType = InterleaveDataset.OP_NAME, + inputsClass = InterleaveDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class InterleaveDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private InterleaveDataset(Operation operation) { - super(operation); + public InterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = InterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java index 35b4da85be7..beb7563bd2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/Iterator.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The IteratorV2 operation */ +@OpMetadata( + opType = Iterator.OP_NAME, + inputsClass = Iterator.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class Iterator extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private Iterator(Operation operation) { - super(operation); + public Iterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = Iterator.class + ) public static class Inputs extends RawOpInputs { /** * The sharedName attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java index 26695a9c0bf..9988643201b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorFromStringHandle.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The IteratorFromStringHandleV2 operation */ +@OpMetadata( + opType = IteratorFromStringHandle.OP_NAME, + inputsClass = IteratorFromStringHandle.Inputs.class +) public final class IteratorFromStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class IteratorFromStringHandle extends RawOp implements Operand resourceHandle; @SuppressWarnings("unchecked") - private IteratorFromStringHandle(Operation operation) { - super(operation); + public IteratorFromStringHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resourceHandle = operation.output(outputIdx++); } @@ -150,6 +156,9 @@ public Options outputShapes(Shape... outputShapes) { } } + @OpInputsMetadata( + outputsClass = IteratorFromStringHandle.class + ) public static class Inputs extends RawOpInputs { /** * The stringHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java index 922c13d28f2..48793131cab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetDevice.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Returns the name of the device on which {@code resource} has been placed. */ +@OpMetadata( + opType = IteratorGetDevice.OP_NAME, + inputsClass = IteratorGetDevice.Inputs.class +) public final class IteratorGetDevice extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class IteratorGetDevice extends RawOp implements Operand { private Output device; - private IteratorGetDevice(Operation operation) { - super(operation); + public IteratorGetDevice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; device = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return device; } + @OpInputsMetadata( + outputsClass = IteratorGetDevice.class + ) public static class Inputs extends RawOpInputs { /** * The resource input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java index 9d48aeb39a5..f8055319926 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNext.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Gets the next output from the given iterator . */ +@OpMetadata( + opType = IteratorGetNext.OP_NAME, + inputsClass = IteratorGetNext.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class IteratorGetNext extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private IteratorGetNext(Operation operation) { - super(operation); + public IteratorGetNext(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -98,6 +104,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = IteratorGetNext.class + ) public static class Inputs extends RawOpInputs { /** * The iterator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java index c679cdbae31..6693787d360 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextAsOptional.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Gets the next output from the given iterator as an Optional variant. */ +@OpMetadata( + opType = IteratorGetNextAsOptional.OP_NAME, + inputsClass = IteratorGetNextAsOptional.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class IteratorGetNextAsOptional extends RawOp implements Operand optional; @SuppressWarnings("unchecked") - private IteratorGetNextAsOptional(Operation operation) { - super(operation); + public IteratorGetNextAsOptional(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; optional = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) optional; } + @OpInputsMetadata( + outputsClass = IteratorGetNextAsOptional.class + ) public static class Inputs extends RawOpInputs { /** * The iterator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java index 1de3a7c0131..c0212523eb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorGetNextSync.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * the calling thread is not a member of the thread pool used to execute parallel * operations (e.g. in eager mode). */ +@OpMetadata( + opType = IteratorGetNextSync.OP_NAME, + inputsClass = IteratorGetNextSync.Inputs.class +) @Operator( group = "data" ) @@ -54,8 +60,8 @@ public final class IteratorGetNextSync extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private IteratorGetNextSync(Operation operation) { - super(operation); + public IteratorGetNextSync(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -102,6 +108,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = IteratorGetNextSync.class + ) public static class Inputs extends RawOpInputs { /** * The iterator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java index 6854c2574c7..c89371833b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/IteratorToStringHandle.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Converts the given {@code resource_handle} representing an iterator to a string. */ +@OpMetadata( + opType = IteratorToStringHandle.OP_NAME, + inputsClass = IteratorToStringHandle.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class IteratorToStringHandle extends RawOp implements Operand stringHandle; - private IteratorToStringHandle(Operation operation) { - super(operation); + public IteratorToStringHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; stringHandle = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return stringHandle; } + @OpInputsMetadata( + outputsClass = IteratorToStringHandle.class + ) public static class Inputs extends RawOpInputs { /** * A handle to an iterator resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java index f65b0c6a2ab..cd2f25f50f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LMDBDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -46,6 +48,10 @@ *

    LMDB uses different file formats on big- and little-endian machines. * {@code data.LMDBDataset} can only read files in the format of the host machine. */ +@OpMetadata( + opType = LMDBDataset.OP_NAME, + inputsClass = LMDBDataset.Inputs.class +) @Operator( group = "data" ) @@ -58,8 +64,8 @@ public final class LMDBDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private LMDBDataset(Operation operation) { - super(operation); + public LMDBDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = LMDBDataset.class + ) public static class Inputs extends RawOpInputs { /** * A scalar or a vector containing the name(s) of the binary file(s) to be diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java index bc8c24e59ff..23caca44faf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LatencyStatsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * Records the latency of producing {@code input_dataset} elements in a StatsAggregator. */ +@OpMetadata( + opType = LatencyStatsDataset.OP_NAME, + inputsClass = LatencyStatsDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class LatencyStatsDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private LatencyStatsDataset(Operation operation) { - super(operation); + public LatencyStatsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = LatencyStatsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java index b30117b7c13..f7aab374ff6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = LeakyReluGrad.OP_NAME, + inputsClass = LeakyReluGrad.Inputs.class +) public final class LeakyReluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class LeakyReluGrad extends RawOp implements Ope private Output backprops; - private LeakyReluGrad(Operation operation) { - super(operation); + public LeakyReluGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -123,6 +129,9 @@ public Options alpha(Float alpha) { } } + @OpInputsMetadata( + outputsClass = LeakyReluGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding LeakyRelu operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java index 94c8f08e22c..e80cc1d2319 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -45,6 +47,10 @@ * allows the training step to proceed so long as some data is available. *

    !! WARNING !! This dataset is not deterministic! */ +@OpMetadata( + opType = LegacyParallelInterleaveDataset.OP_NAME, + inputsClass = LegacyParallelInterleaveDataset.Inputs.class +) @Operator( group = "data" ) @@ -57,8 +63,8 @@ public final class LegacyParallelInterleaveDataset extends RawOp implements Oper private Output handle; @SuppressWarnings("unchecked") - private LegacyParallelInterleaveDataset(Operation operation) { - super(operation); + public LegacyParallelInterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -160,6 +166,9 @@ public Options deterministic(String deterministic) { } } + @OpInputsMetadata( + outputsClass = LegacyParallelInterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LoadDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LoadDataset.java index be67a1a67a3..08bc5254788 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LoadDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LoadDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ /** * The LoadDataset operation */ +@OpMetadata( + opType = LoadDataset.OP_NAME, + inputsClass = LoadDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class LoadDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private LoadDataset(Operation operation) { - super(operation); + public LoadDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options compression(String compression) { } } + @OpInputsMetadata( + outputsClass = LoadDataset.class + ) public static class Inputs extends RawOpInputs { /** * The path input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java index 93cca862b96..fb2eb9a4ebf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MakeIterator.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ * This operation may be executed multiple times. Each execution will reset the * iterator in {@code iterator} to the first element of {@code dataset}. */ +@OpMetadata( + opType = MakeIterator.OP_NAME, + inputsClass = MakeIterator.Inputs.class +) @Operator( group = "data" ) @@ -43,8 +49,8 @@ public final class MakeIterator extends RawOp { */ public static final String OP_NAME = "MakeIterator"; - private MakeIterator(Operation operation) { - super(operation); + public MakeIterator(Operation operation) { + super(operation, OP_NAME); } /** @@ -66,6 +72,9 @@ public static MakeIterator create(Scope scope, Operand dataset, return new MakeIterator(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = MakeIterator.class + ) public static class Inputs extends RawOpInputs { /** * The dataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java index b1bbacd6622..6d3f2f0cb84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -44,6 +46,10 @@ *

    Unlike a "MapDataset", which applies {@code f} sequentially, this dataset invokes up * to {@code batch_size * num_parallel_batches} copies of {@code f} in parallel. */ +@OpMetadata( + opType = MapAndBatchDataset.OP_NAME, + inputsClass = MapAndBatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class MapAndBatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private MapAndBatchDataset(Operation operation) { - super(operation); + public MapAndBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = MapAndBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java index 81c569034d3..41ed07c9da3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. */ +@OpMetadata( + opType = MapDataset.OP_NAME, + inputsClass = MapDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class MapDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private MapDataset(Operation operation) { - super(operation); + public MapDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -166,6 +172,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = MapDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java index c8d0af517a3..015f28e5bdb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MatchingFilesDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * The MatchingFilesDataset operation */ +@OpMetadata( + opType = MatchingFilesDataset.OP_NAME, + inputsClass = MatchingFilesDataset.Inputs.class +) @Operator( group = "data" ) @@ -46,8 +52,8 @@ public final class MatchingFilesDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private MatchingFilesDataset(Operation operation) { - super(operation); + public MatchingFilesDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = MatchingFilesDataset.class + ) public static class Inputs extends RawOpInputs { /** * The patterns input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java index 883390d70de..1106758abe9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MaxIntraOpParallelismDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that overrides the maximum intra-op parallelism. */ +@OpMetadata( + opType = MaxIntraOpParallelismDataset.OP_NAME, + inputsClass = MaxIntraOpParallelismDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class MaxIntraOpParallelismDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private MaxIntraOpParallelismDataset(Operation operation) { - super(operation); + public MaxIntraOpParallelismDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = MaxIntraOpParallelismDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java index b600addbc87..4545f2aee1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ModelDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * Identity transformation that models performance. * Identity transformation that models performance. */ +@OpMetadata( + opType = ModelDataset.OP_NAME, + inputsClass = ModelDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class ModelDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ModelDataset(Operation operation) { - super(operation); + public ModelDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -187,6 +193,9 @@ public Options ramBudget(Long ramBudget) { } } + @OpInputsMetadata( + outputsClass = ModelDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java index 60589923320..0fa7f6b0aea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIterator.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a MultiDeviceIterator resource. */ +@OpMetadata( + opType = MultiDeviceIterator.OP_NAME, + inputsClass = MultiDeviceIterator.Inputs.class +) public final class MultiDeviceIterator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class MultiDeviceIterator extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private MultiDeviceIterator(Operation operation) { - super(operation); + public MultiDeviceIterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = MultiDeviceIterator.class + ) public static class Inputs extends RawOpInputs { /** * A list of devices the iterator works across. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java index ce3b30d17f7..5f0bea11def 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorFromStringHandle.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Generates a MultiDeviceIterator resource from its provided string handle. */ +@OpMetadata( + opType = MultiDeviceIteratorFromStringHandle.OP_NAME, + inputsClass = MultiDeviceIteratorFromStringHandle.Inputs.class +) public final class MultiDeviceIteratorFromStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class MultiDeviceIteratorFromStringHandle extends RawOp implements private Output multiDeviceIterator; @SuppressWarnings("unchecked") - private MultiDeviceIteratorFromStringHandle(Operation operation) { - super(operation); + public MultiDeviceIteratorFromStringHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; multiDeviceIterator = operation.output(outputIdx++); } @@ -150,6 +156,9 @@ public Options outputShapes(Shape... outputShapes) { } } + @OpInputsMetadata( + outputsClass = MultiDeviceIteratorFromStringHandle.class + ) public static class Inputs extends RawOpInputs { /** * String representing the resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java index 778bf06b10b..6c1edc53a1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorGetNextFromShard.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * Gets next element for the provided shard number. */ +@OpMetadata( + opType = MultiDeviceIteratorGetNextFromShard.OP_NAME, + inputsClass = MultiDeviceIteratorGetNextFromShard.Inputs.class +) public final class MultiDeviceIteratorGetNextFromShard extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class MultiDeviceIteratorGetNextFromShard extends RawOp implements private List> components; @SuppressWarnings("unchecked") - private MultiDeviceIteratorGetNextFromShard(Operation operation) { - super(operation); + public MultiDeviceIteratorGetNextFromShard(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -102,6 +108,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = MultiDeviceIteratorGetNextFromShard.class + ) public static class Inputs extends RawOpInputs { /** * A MultiDeviceIterator resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java index 107b2c1cb5f..a6879702e46 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorInit.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; /** * Initializes the multi device iterator with the given dataset. */ +@OpMetadata( + opType = MultiDeviceIteratorInit.OP_NAME, + inputsClass = MultiDeviceIteratorInit.Inputs.class +) public final class MultiDeviceIteratorInit extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class MultiDeviceIteratorInit extends RawOp implements Operand incarnationId; - private MultiDeviceIteratorInit(Operation operation) { - super(operation); + public MultiDeviceIteratorInit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; incarnationId = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return incarnationId; } + @OpInputsMetadata( + outputsClass = MultiDeviceIteratorInit.class + ) public static class Inputs extends RawOpInputs { /** * Dataset to be iterated upon. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java index d65385e4f31..bf4763765c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MultiDeviceIteratorToStringHandle.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Produces a string handle for the given MultiDeviceIterator. */ +@OpMetadata( + opType = MultiDeviceIteratorToStringHandle.OP_NAME, + inputsClass = MultiDeviceIteratorToStringHandle.Inputs.class +) public final class MultiDeviceIteratorToStringHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class MultiDeviceIteratorToStringHandle extends RawOp implements Op private Output stringHandle; - private MultiDeviceIteratorToStringHandle(Operation operation) { - super(operation); + public MultiDeviceIteratorToStringHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; stringHandle = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return stringHandle; } + @OpInputsMetadata( + outputsClass = MultiDeviceIteratorToStringHandle.class + ) public static class Inputs extends RawOpInputs { /** * A MultiDeviceIterator resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java index a57c2fc3513..b7ce803ce39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/NonSerializableDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The NonSerializableDataset operation */ +@OpMetadata( + opType = NonSerializableDataset.OP_NAME, + inputsClass = NonSerializableDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class NonSerializableDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private NonSerializableDataset(Operation operation) { - super(operation); + public NonSerializableDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = NonSerializableDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OneShotIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OneShotIterator.java index a2744d8bacb..f022e05c941 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OneShotIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OneShotIterator.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -54,6 +56,10 @@ * (including fed values) as parameters, and which may be reset multiple * times by rerunning "MakeIterator". */ +@OpMetadata( + opType = OneShotIterator.OP_NAME, + inputsClass = OneShotIterator.Inputs.class +) @Operator( group = "data" ) @@ -66,8 +72,8 @@ public final class OneShotIterator extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private OneShotIterator(Operation operation) { - super(operation); + public OneShotIterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -179,6 +185,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = OneShotIterator.class + ) public static class Inputs extends RawOpInputs { /** * The outputTypes attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java index 17b48b79f36..a30c7587cdc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptimizeDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ * Creates a dataset by applying related optimizations to {@code input_dataset}. * Creates a dataset by applying related optimizations to {@code input_dataset}. */ +@OpMetadata( + opType = OptimizeDataset.OP_NAME, + inputsClass = OptimizeDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class OptimizeDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private OptimizeDataset(Operation operation) { - super(operation); + public OptimizeDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -169,6 +175,9 @@ public Options optimizationConfigs(String... optimizationConfigs) { } } + @OpInputsMetadata( + outputsClass = OptimizeDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java index 87d6a090953..1d8c245fb7f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalFromValue.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ /** * Constructs an Optional variant from a tuple of tensors. */ +@OpMetadata( + opType = OptionalFromValue.OP_NAME, + inputsClass = OptionalFromValue.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class OptionalFromValue extends RawOp implements Operand { private Output optional; @SuppressWarnings("unchecked") - private OptionalFromValue(Operation operation) { - super(operation); + public OptionalFromValue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; optional = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return (Output) optional; } + @OpInputsMetadata( + outputsClass = OptionalFromValue.class + ) public static class Inputs extends RawOpInputs { /** * The components input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java index aa3320ef50e..4293d28a907 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalGetValue.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Returns the value stored in an Optional variant or raises an error if none exists. */ +@OpMetadata( + opType = OptionalGetValue.OP_NAME, + inputsClass = OptionalGetValue.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class OptionalGetValue extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private OptionalGetValue(Operation operation) { - super(operation); + public OptionalGetValue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -98,6 +104,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = OptionalGetValue.class + ) public static class Inputs extends RawOpInputs { /** * The optional input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java index 53dac930b5a..930f9e7725e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalHasValue.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Returns true if and only if the given Optional variant has a value. */ +@OpMetadata( + opType = OptionalHasValue.OP_NAME, + inputsClass = OptionalHasValue.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class OptionalHasValue extends RawOp implements Operand { private Output hasValue; - private OptionalHasValue(Operation operation) { - super(operation); + public OptionalHasValue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; hasValue = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return hasValue; } + @OpInputsMetadata( + outputsClass = OptionalHasValue.class + ) public static class Inputs extends RawOpInputs { /** * The optional input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java index f051ca0c79b..5b2ca755908 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionalNone.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * Creates an Optional variant with no value. */ +@OpMetadata( + opType = OptionalNone.OP_NAME, + inputsClass = OptionalNone.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class OptionalNone extends RawOp implements Operand { private Output optional; @SuppressWarnings("unchecked") - private OptionalNone(Operation operation) { - super(operation); + public OptionalNone(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; optional = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return (Output) optional; } + @OpInputsMetadata( + outputsClass = OptionalNone.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new OptionalNone(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java index c62b67b68c4..63b202eb184 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset by attaching tf.data.Options to {@code input_dataset}. */ +@OpMetadata( + opType = OptionsDataset.OP_NAME, + inputsClass = OptionsDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class OptionsDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private OptionsDataset(Operation operation) { - super(operation); + public OptionsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = OptionsDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java index fe1e629e8cd..5941ac19a16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ /** * Creates a dataset that batches and pads {@code batch_size} elements from the input. */ +@OpMetadata( + opType = PaddedBatchDataset.OP_NAME, + inputsClass = PaddedBatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class PaddedBatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private PaddedBatchDataset(Operation operation) { - super(operation); + public PaddedBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -150,6 +156,9 @@ public Options parallelCopy(Boolean parallelCopy) { } } + @OpInputsMetadata( + outputsClass = PaddedBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java index 15805d15774..08e3928b4b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -39,6 +41,10 @@ /** * The ParallelBatchDataset operation */ +@OpMetadata( + opType = ParallelBatchDataset.OP_NAME, + inputsClass = ParallelBatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class ParallelBatchDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private ParallelBatchDataset(Operation operation) { - super(operation); + public ParallelBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -169,6 +175,9 @@ public Options deterministic(String deterministic) { } } + @OpInputsMetadata( + outputsClass = ParallelBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java index c5e17ed7388..edbe27c6f73 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -50,6 +52,10 @@ * {@code experimental_deterministic} parameter of {@code tf.data.Options} to {@code False}. * This can improve performance at the expense of non-determinism. */ +@OpMetadata( + opType = ParallelInterleaveDataset.OP_NAME, + inputsClass = ParallelInterleaveDataset.Inputs.class +) @Operator( group = "data" ) @@ -62,8 +68,8 @@ public final class ParallelInterleaveDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private ParallelInterleaveDataset(Operation operation) { - super(operation); + public ParallelInterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -182,6 +188,9 @@ public Options deterministic(String deterministic) { } } + @OpInputsMetadata( + outputsClass = ParallelInterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * Dataset that produces a stream of arguments for the function {@code f}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java index 455c6b600e9..1768a7c488d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * Unlike a "MapDataset", which applies {@code f} sequentially, this dataset invokes up * to {@code num_parallel_calls} copies of {@code f} in parallel. */ +@OpMetadata( + opType = ParallelMapDataset.OP_NAME, + inputsClass = ParallelMapDataset.Inputs.class +) @Operator( group = "data" ) @@ -53,8 +59,8 @@ public final class ParallelMapDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ParallelMapDataset(Operation operation) { - super(operation); + public ParallelMapDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -198,6 +204,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = ParallelMapDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java index 501716524f8..1fae1337fc4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParseExampleDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * Transforms {@code input_dataset} containing {@code Example} protos as vectors of DT_STRING into a dataset of {@code Tensor} or {@code SparseTensor} objects representing the parsed features. */ +@OpMetadata( + opType = ParseExampleDataset.OP_NAME, + inputsClass = ParseExampleDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class ParseExampleDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ParseExampleDataset(Operation operation) { - super(operation); + public ParseExampleDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -240,6 +246,9 @@ public Options raggedKeys(String... raggedKeys) { } } + @OpInputsMetadata( + outputsClass = ParseExampleDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java index b27ce7b56ef..8334e1aa890 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that asynchronously prefetches elements from {@code input_dataset}. */ +@OpMetadata( + opType = PrefetchDataset.OP_NAME, + inputsClass = PrefetchDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class PrefetchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private PrefetchDataset(Operation operation) { - super(operation); + public PrefetchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -191,6 +197,9 @@ public Options bufferSizeMin(Long bufferSizeMin) { } } + @OpInputsMetadata( + outputsClass = PrefetchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java index 637436c7385..307eef240ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrivateThreadPoolDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = PrivateThreadPoolDataset.OP_NAME, + inputsClass = PrivateThreadPoolDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class PrivateThreadPoolDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private PrivateThreadPoolDataset(Operation operation) { - super(operation); + public PrivateThreadPoolDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = PrivateThreadPoolDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java index c02dc5045d1..9bab5a5f5b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -46,6 +48,10 @@ * performed is determined by the {@code experimental_optimization.hoist_random_uniform} * option of {@code tf.data.Options}. */ +@OpMetadata( + opType = RandomDataset.OP_NAME, + inputsClass = RandomDataset.Inputs.class +) @Operator( group = "data" ) @@ -58,8 +64,8 @@ public final class RandomDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RandomDataset(Operation operation) { - super(operation); + public RandomDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = RandomDataset.class + ) public static class Inputs extends RawOpInputs { /** * A scalar seed for the random number generator. If either seed or diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java index a77dbc0faa1..fe3b9afedfe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset with a range of values. Corresponds to python's xrange. */ +@OpMetadata( + opType = RangeDataset.OP_NAME, + inputsClass = RangeDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class RangeDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RangeDataset(Operation operation) { - super(operation); + public RangeDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = RangeDataset.class + ) public static class Inputs extends RawOpInputs { /** * corresponds to start in python's xrange(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java index f2f29dfa889..1d83f3b9a49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RebatchDatasetV2.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -41,6 +43,10 @@ * Creates a dataset that rebatches elements from {@code input_dataset} into new batch * sizes. */ +@OpMetadata( + opType = RebatchDatasetV2.OP_NAME, + inputsClass = RebatchDatasetV2.Inputs.class +) @Operator( group = "data" ) @@ -53,8 +59,8 @@ public final class RebatchDatasetV2 extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RebatchDatasetV2(Operation operation) { - super(operation); + public RebatchDatasetV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = RebatchDatasetV2.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java index 5c5933a4ecc..26d4cdb1210 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ /** * Reduces the input dataset to a singleton using a reduce function. */ +@OpMetadata( + opType = ReduceDataset.OP_NAME, + inputsClass = ReduceDataset.Inputs.class +) @Operator( group = "data" ) @@ -51,8 +57,8 @@ public final class ReduceDataset extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private ReduceDataset(Operation operation) { - super(operation); + public ReduceDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -148,6 +154,9 @@ public Options useInterOpParallelism(Boolean useInterOpParallelism) { } } + @OpInputsMetadata( + outputsClass = ReduceDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java index faad87061c8..73adca2a8a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * Registers a dataset with the tf.data service. */ +@OpMetadata( + opType = RegisterDataset.OP_NAME, + inputsClass = RegisterDataset.Inputs.class +) @Operator( group = "data" ) @@ -46,8 +52,8 @@ public final class RegisterDataset extends RawOp implements Operand { private Output datasetId; - private RegisterDataset(Operation operation) { - super(operation); + public RegisterDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; datasetId = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return datasetId; } + @OpInputsMetadata( + outputsClass = RegisterDataset.class + ) public static class Inputs extends RawOpInputs { /** * The dataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java index be3bf8d47fe..5139087b24f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that emits the outputs of {@code input_dataset} {@code count} times. */ +@OpMetadata( + opType = RepeatDataset.OP_NAME, + inputsClass = RepeatDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class RepeatDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RepeatDataset(Operation operation) { - super(operation); + public RepeatDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = RepeatDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java index f114ab2c440..8f1658d5034 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SamplingDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -44,6 +46,10 @@ * {@code experimental_optimization.filter_with_random_uniform_fusion} option of * {@code tf.data.Options}. */ +@OpMetadata( + opType = SamplingDataset.OP_NAME, + inputsClass = SamplingDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class SamplingDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SamplingDataset(Operation operation) { - super(operation); + public SamplingDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -110,6 +116,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SamplingDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java index b74ce0f5c33..709cf5a7037 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -36,6 +38,10 @@ /** * The SaveDataset operation */ +@OpMetadata( + opType = SaveDataset.OP_NAME, + inputsClass = SaveDataset.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class SaveDataset extends RawOp { */ public static final String OP_NAME = "SaveDataset"; - private SaveDataset(Operation operation) { - super(operation); + public SaveDataset(Operation operation) { + super(operation, OP_NAME); } /** @@ -138,6 +144,9 @@ public Options useShardFunc(Boolean useShardFunc) { } } + @OpInputsMetadata( + outputsClass = SaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java index 3507f938ad7..dcb6a1d8309 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Creates a dataset successively reduces {@code f} over the elements of {@code input_dataset}. */ +@OpMetadata( + opType = ScanDataset.OP_NAME, + inputsClass = ScanDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class ScanDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ScanDataset(Operation operation) { - super(operation); + public ScanDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -168,6 +174,9 @@ public Options useDefaultDevice(Boolean useDefaultDevice) { } } + @OpInputsMetadata( + outputsClass = ScanDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java index 9afe58663a0..34466b422f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SerializeIterator.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * Converts the given {@code resource_handle} representing an iterator to a variant tensor. */ +@OpMetadata( + opType = SerializeIterator.OP_NAME, + inputsClass = SerializeIterator.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class SerializeIterator extends RawOp implements Operand { private Output serialized; @SuppressWarnings("unchecked") - private SerializeIterator(Operation operation) { - super(operation); + public SerializeIterator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; serialized = operation.output(outputIdx++); } @@ -123,6 +129,9 @@ public Options externalStatePolicy(Long externalStatePolicy) { } } + @OpInputsMetadata( + outputsClass = SerializeIterator.class + ) public static class Inputs extends RawOpInputs { /** * A handle to an iterator resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java index 7a3fbe5988e..59208610c19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SetStatsAggregatorDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * The SetStatsAggregatorDataset operation */ +@OpMetadata( + opType = SetStatsAggregatorDataset.OP_NAME, + inputsClass = SetStatsAggregatorDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class SetStatsAggregatorDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private SetStatsAggregatorDataset(Operation operation) { - super(operation); + public SetStatsAggregatorDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -104,6 +110,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SetStatsAggregatorDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java index e380b54d962..951878e72d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a {@code Dataset} that includes only 1/{@code num_shards} of this dataset. */ +@OpMetadata( + opType = ShardDataset.OP_NAME, + inputsClass = ShardDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class ShardDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ShardDataset(Operation operation) { - super(operation); + public ShardDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options requireNonEmpty(Boolean requireNonEmpty) { } } + @OpInputsMetadata( + outputsClass = ShardDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java index 47d1782ba34..318f305bd8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * The ShuffleAndRepeatDatasetV2 operation */ +@OpMetadata( + opType = ShuffleAndRepeatDataset.OP_NAME, + inputsClass = ShuffleAndRepeatDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class ShuffleAndRepeatDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private ShuffleAndRepeatDataset(Operation operation) { - super(operation); + public ShuffleAndRepeatDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -147,6 +153,9 @@ public Options reshuffleEachIteration(Boolean reshuffleEachIteration) { } } + @OpInputsMetadata( + outputsClass = ShuffleAndRepeatDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java index dc222f3c559..92cf83d4c4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * The ShuffleDatasetV3 operation */ +@OpMetadata( + opType = ShuffleDataset.OP_NAME, + inputsClass = ShuffleDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class ShuffleDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ShuffleDataset(Operation operation) { - super(operation); + public ShuffleDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -145,6 +151,9 @@ public Options reshuffleEachIteration(Boolean reshuffleEachIteration) { } } + @OpInputsMetadata( + outputsClass = ShuffleDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java index 366b29b17d2..2734d6ff08d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that skips {@code count} elements from the {@code input_dataset}. */ +@OpMetadata( + opType = SkipDataset.OP_NAME, + inputsClass = SkipDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class SkipDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SkipDataset(Operation operation) { - super(operation); + public SkipDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SkipDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java index 9d687853a45..f337c5d3fbb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SleepDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * The SleepDataset operation */ +@OpMetadata( + opType = SleepDataset.OP_NAME, + inputsClass = SleepDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class SleepDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SleepDataset(Operation operation) { - super(operation); + public SleepDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SleepDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java index 4d46ce5a558..673e100adc6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that passes a sliding window over {@code input_dataset}. */ +@OpMetadata( + opType = SlidingWindowDataset.OP_NAME, + inputsClass = SlidingWindowDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class SlidingWindowDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private SlidingWindowDataset(Operation operation) { - super(operation); + public SlidingWindowDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SlidingWindowDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java index 45001f8e541..323fa9514b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -43,6 +45,10 @@ * If not, it will run the preprocessing pipeline as usual, and write out a * snapshot of the data processed for future use. */ +@OpMetadata( + opType = SnapshotDataset.OP_NAME, + inputsClass = SnapshotDataset.Inputs.class +) @Operator( group = "data" ) @@ -55,8 +61,8 @@ public final class SnapshotDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SnapshotDataset(Operation operation) { - super(operation); + public SnapshotDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -257,6 +263,9 @@ public Options hash(Long hash) { } } + @OpInputsMetadata( + outputsClass = SnapshotDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java index 0d07f5021ea..b41f6dbf768 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDatasetReader.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * The SnapshotDatasetReader operation */ +@OpMetadata( + opType = SnapshotDatasetReader.OP_NAME, + inputsClass = SnapshotDatasetReader.Inputs.class +) public final class SnapshotDatasetReader extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class SnapshotDatasetReader extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private SnapshotDatasetReader(Operation operation) { - super(operation); + public SnapshotDatasetReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Options compression(String compression) { } } + @OpInputsMetadata( + outputsClass = SnapshotDatasetReader.class + ) public static class Inputs extends RawOpInputs { /** * The shardDir input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java index 8fa6cd57f37..8167304faba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotNestedDatasetReader.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * The SnapshotNestedDatasetReader operation */ +@OpMetadata( + opType = SnapshotNestedDatasetReader.OP_NAME, + inputsClass = SnapshotNestedDatasetReader.Inputs.class +) public final class SnapshotNestedDatasetReader extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class SnapshotNestedDatasetReader extends RawOp implements Operand< private Output handle; @SuppressWarnings("unchecked") - private SnapshotNestedDatasetReader(Operation operation) { - super(operation); + public SnapshotNestedDatasetReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SnapshotNestedDatasetReader.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java index ba357f78d13..6bea76cd06e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SparseTensorSliceDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -35,6 +37,10 @@ /** * Creates a dataset that splits a SparseTensor into elements row-wise. */ +@OpMetadata( + opType = SparseTensorSliceDataset.OP_NAME, + inputsClass = SparseTensorSliceDataset.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class SparseTensorSliceDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private SparseTensorSliceDataset(Operation operation) { - super(operation); + public SparseTensorSliceDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SparseTensorSliceDataset.class + ) public static class Inputs extends RawOpInputs { /** * The indices input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java index 7e63d4a1117..81ecba9af64 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SqlDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ /** * Creates a dataset that executes a SQL query and emits rows of the result set. */ +@OpMetadata( + opType = SqlDataset.OP_NAME, + inputsClass = SqlDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class SqlDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SqlDataset(Operation operation) { - super(operation); + public SqlDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SqlDataset.class + ) public static class Inputs extends RawOpInputs { /** * The database type. Currently, the only supported type is 'sqlite'. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java index 5e3f2df0d94..d27939fd91b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorHandle.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The StatsAggregatorHandleV2 operation */ +@OpMetadata( + opType = StatsAggregatorHandle.OP_NAME, + inputsClass = StatsAggregatorHandle.Inputs.class +) public final class StatsAggregatorHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StatsAggregatorHandle extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private StatsAggregatorHandle(Operation operation) { - super(operation); + public StatsAggregatorHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = StatsAggregatorHandle.class + ) public static class Inputs extends RawOpInputs { /** * The container attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java index 5df60be9489..ce220e627d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/StatsAggregatorSetSummaryWriter.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Set a summary_writer_interface to record statistics using given stats_aggregator. */ +@OpMetadata( + opType = StatsAggregatorSetSummaryWriter.OP_NAME, + inputsClass = StatsAggregatorSetSummaryWriter.Inputs.class +) public final class StatsAggregatorSetSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "StatsAggregatorSetSummaryWriter"; - private StatsAggregatorSetSummaryWriter(Operation operation) { - super(operation); + public StatsAggregatorSetSummaryWriter(Operation operation) { + super(operation, OP_NAME); } /** @@ -60,6 +66,9 @@ public static StatsAggregatorSetSummaryWriter create(Scope scope, return new StatsAggregatorSetSummaryWriter(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = StatsAggregatorSetSummaryWriter.class + ) public static class Inputs extends RawOpInputs { /** * The statsAggregator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java index dcd201e8938..83cdbb86ee1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ /** * Creates a dataset that contains {@code count} elements from the {@code input_dataset}. */ +@OpMetadata( + opType = TakeDataset.OP_NAME, + inputsClass = TakeDataset.Inputs.class +) @Operator( group = "data" ) @@ -50,8 +56,8 @@ public final class TakeDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TakeDataset(Operation operation) { - super(operation); + public TakeDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TakeDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java index e1fdb783b61..372da7e0af7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ *

  • One tensor for each value in {@code other_arguments}.
  • * */ +@OpMetadata( + opType = TakeWhileDataset.OP_NAME, + inputsClass = TakeWhileDataset.Inputs.class +) @Operator( group = "data" ) @@ -56,8 +62,8 @@ public final class TakeWhileDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TakeWhileDataset(Operation operation) { - super(operation); + public TakeWhileDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TakeWhileDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java index 96406ec3243..59f597498a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that emits {@code components} as a tuple of tensors once. */ +@OpMetadata( + opType = TensorDataset.OP_NAME, + inputsClass = TensorDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class TensorDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TensorDataset(Operation operation) { - super(operation); + public TensorDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TensorDataset.class + ) public static class Inputs extends RawOpInputs { /** * The components input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java index cffdcc7140b..1e74a18e2b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that emits each dim-0 slice of {@code components} once. */ +@OpMetadata( + opType = TensorSliceDataset.OP_NAME, + inputsClass = TensorSliceDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class TensorSliceDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TensorSliceDataset(Operation operation) { - super(operation); + public TensorSliceDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TensorSliceDataset.class + ) public static class Inputs extends RawOpInputs { /** * The components input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java index 1c64b7a5913..e58a0eaad0a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * Creates a dataset that emits the lines of one or more text files. */ +@OpMetadata( + opType = TextLineDataset.OP_NAME, + inputsClass = TextLineDataset.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class TextLineDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TextLineDataset(Operation operation) { - super(operation); + public TextLineDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TextLineDataset.class + ) public static class Inputs extends RawOpInputs { /** * A scalar or a vector containing the name(s) of the file(s) to be diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java index 87345662d72..d46b73555f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * Creates a dataset that emits the records from one or more TFRecord files. */ +@OpMetadata( + opType = TfRecordDataset.OP_NAME, + inputsClass = TfRecordDataset.Inputs.class +) @Operator( group = "data" ) @@ -47,8 +53,8 @@ public final class TfRecordDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TfRecordDataset(Operation operation) { - super(operation); + public TfRecordDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TfRecordDataset.class + ) public static class Inputs extends RawOpInputs { /** * A scalar or vector containing the name(s) of the file(s) to be diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java index c9ff2b00969..08e8e93d318 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = ThreadPoolDataset.OP_NAME, + inputsClass = ThreadPoolDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class ThreadPoolDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ThreadPoolDataset(Operation operation) { - super(operation); + public ThreadPoolDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ThreadPoolDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java index 17d867e8b8a..ebb41671d3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ThreadPoolHandle.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = ThreadPoolHandle.OP_NAME, + inputsClass = ThreadPoolHandle.Inputs.class +) public final class ThreadPoolHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class ThreadPoolHandle extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ThreadPoolHandle(Operation operation) { - super(operation); + public ThreadPoolHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -177,6 +183,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = ThreadPoolHandle.class + ) public static class Inputs extends RawOpInputs { /** * The number of threads in the thread pool. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java index 0abf0d19313..5256a74b953 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * A dataset that splits the elements of its input into multiple elements. */ +@OpMetadata( + opType = UnbatchDataset.OP_NAME, + inputsClass = UnbatchDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class UnbatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private UnbatchDataset(Operation operation) { - super(operation); + public UnbatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = UnbatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java index a48c055083a..1036da5df67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UncompressElement.java @@ -31,12 +31,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Uncompresses a compressed dataset element. */ +@OpMetadata( + opType = UncompressElement.OP_NAME, + inputsClass = UncompressElement.Inputs.class +) public final class UncompressElement extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class UncompressElement extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private UncompressElement(Operation operation) { - super(operation); + public UncompressElement(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -94,6 +100,9 @@ public Iterator> iterator() { return (Iterator) components.iterator(); } + @OpInputsMetadata( + outputsClass = UncompressElement.class + ) public static class Inputs extends RawOpInputs { /** * The compressed input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java index 86feaca406d..61cb6ac4538 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that contains the unique elements of {@code input_dataset}. */ +@OpMetadata( + opType = UniqueDataset.OP_NAME, + inputsClass = UniqueDataset.Inputs.class +) @Operator( group = "data" ) @@ -49,8 +55,8 @@ public final class UniqueDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private UniqueDataset(Operation operation) { - super(operation); + public UniqueDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = UniqueDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java index eb4ed977823..f2e837a4f8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnwrapDatasetVariant.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The UnwrapDatasetVariant operation */ +@OpMetadata( + opType = UnwrapDatasetVariant.OP_NAME, + inputsClass = UnwrapDatasetVariant.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class UnwrapDatasetVariant extends RawOp implements Operand private Output outputHandle; @SuppressWarnings("unchecked") - private UnwrapDatasetVariant(Operation operation) { - super(operation); + public UnwrapDatasetVariant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = UnwrapDatasetVariant.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java index adce99206cc..384bcf89c8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -74,6 +76,10 @@ * produces {@code {{"a": {0, 1}}, {"a": {2, 3}}}} * */ +@OpMetadata( + opType = WindowDataset.OP_NAME, + inputsClass = WindowDataset.Inputs.class +) @Operator( group = "data" ) @@ -86,8 +92,8 @@ public final class WindowDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private WindowDataset(Operation operation) { - super(operation); + public WindowDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = WindowDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java index 5a52404b85e..36ade21b269 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WrapDatasetVariant.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The WrapDatasetVariant operation */ +@OpMetadata( + opType = WrapDatasetVariant.OP_NAME, + inputsClass = WrapDatasetVariant.Inputs.class +) @Operator( group = "data" ) @@ -45,8 +51,8 @@ public final class WrapDatasetVariant extends RawOp implements Operand { private Output outputHandle; @SuppressWarnings("unchecked") - private WrapDatasetVariant(Operation operation) { - super(operation); + public WrapDatasetVariant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputHandle = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) outputHandle; } + @OpInputsMetadata( + outputsClass = WrapDatasetVariant.class + ) public static class Inputs extends RawOpInputs { /** * The inputHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java index 874b24717a5..89192eb8559 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ *

    The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. */ +@OpMetadata( + opType = ZipDataset.OP_NAME, + inputsClass = ZipDataset.Inputs.class +) @Operator( group = "data" ) @@ -53,8 +59,8 @@ public final class ZipDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ZipDataset(Operation operation) { - super(operation); + public ZipDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ZipDataset.class + ) public static class Inputs extends RawOpInputs { /** * List of {@code N} variant Tensors representing datasets to be zipped together. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java index f2c7acf738f..facd34343cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AssertNextDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The ExperimentalAssertNextDataset operation */ +@OpMetadata( + opType = AssertNextDataset.OP_NAME, + inputsClass = AssertNextDataset.Inputs.class +) public final class AssertNextDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class AssertNextDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private AssertNextDataset(Operation operation) { - super(operation); + public AssertNextDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = AssertNextDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java index 69e5e5a2ba2..419a5c57ee3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/AutoShardDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ *

    This dataset will throw a NotFound error if we cannot shard the dataset * automatically. */ +@OpMetadata( + opType = AutoShardDataset.OP_NAME, + inputsClass = AutoShardDataset.Inputs.class +) public final class AutoShardDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class AutoShardDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private AutoShardDataset(Operation operation) { - super(operation); + public AutoShardDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options autoShardPolicy(Long autoShardPolicy) { } } + @OpInputsMetadata( + outputsClass = AutoShardDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java index 13fb1165700..5c2ec41eaad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/BytesProducedStatsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Records the bytes size of each element of {@code input_dataset} in a StatsAggregator. */ +@OpMetadata( + opType = BytesProducedStatsDataset.OP_NAME, + inputsClass = BytesProducedStatsDataset.Inputs.class +) public final class BytesProducedStatsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BytesProducedStatsDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private BytesProducedStatsDataset(Operation operation) { - super(operation); + public BytesProducedStatsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = BytesProducedStatsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java index 996e8d5744e..373ffb23b73 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/CSVDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * The ExperimentalCSVDataset operation */ +@OpMetadata( + opType = CSVDataset.OP_NAME, + inputsClass = CSVDataset.Inputs.class +) public final class CSVDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class CSVDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private CSVDataset(Operation operation) { - super(operation); + public CSVDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -110,6 +116,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = CSVDataset.class + ) public static class Inputs extends RawOpInputs { /** * The filenames input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java index a6d593bda51..cc1d2bb52ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ChooseFastestDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * The ExperimentalChooseFastestDataset operation */ +@OpMetadata( + opType = ChooseFastestDataset.OP_NAME, + inputsClass = ChooseFastestDataset.Inputs.class +) public final class ChooseFastestDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class ChooseFastestDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private ChooseFastestDataset(Operation operation) { - super(operation); + public ChooseFastestDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ChooseFastestDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDatasets input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java index d1c6050a755..2c2c35fd6e3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetCardinality.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ * Returns the cardinality of {@code input_dataset}. * Returns the cardinality of {@code input_dataset}. */ +@OpMetadata( + opType = DatasetCardinality.OP_NAME, + inputsClass = DatasetCardinality.Inputs.class +) public final class DatasetCardinality extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class DatasetCardinality extends RawOp implements Operand { private Output cardinality; - private DatasetCardinality(Operation operation) { - super(operation); + public DatasetCardinality(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; cardinality = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return cardinality; } + @OpInputsMetadata( + outputsClass = DatasetCardinality.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the dataset to return cardinality for. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java index 6ad2bd5ec12..e110a9c7722 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DatasetToTFRecord.java @@ -26,20 +26,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Writes the given dataset to the given file using the TFRecord format. */ +@OpMetadata( + opType = DatasetToTFRecord.OP_NAME, + inputsClass = DatasetToTFRecord.Inputs.class +) public final class DatasetToTFRecord extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ExperimentalDatasetToTFRecord"; - private DatasetToTFRecord(Operation operation) { - super(operation); + public DatasetToTFRecord(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static DatasetToTFRecord create(Scope scope, Operand inp return new DatasetToTFRecord(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DatasetToTFRecord.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the dataset to write. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java index ae25c651a41..92b8f88135e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DenseToSparseBatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that batches input elements into a SparseTensor. */ +@OpMetadata( + opType = DenseToSparseBatchDataset.OP_NAME, + inputsClass = DenseToSparseBatchDataset.Inputs.class +) public final class DenseToSparseBatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class DenseToSparseBatchDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private DenseToSparseBatchDataset(Operation operation) { - super(operation); + public DenseToSparseBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DenseToSparseBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * A handle to an input dataset. Must have a single component. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java index 04d9e0bad91..74c7d32e932 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/DirectedInterleaveDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * A substitute for {@code InterleaveDataset} on a fixed list of {@code N} datasets. */ +@OpMetadata( + opType = DirectedInterleaveDataset.OP_NAME, + inputsClass = DirectedInterleaveDataset.Inputs.class +) public final class DirectedInterleaveDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class DirectedInterleaveDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private DirectedInterleaveDataset(Operation operation) { - super(operation); + public DirectedInterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DirectedInterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * A dataset of scalar {@code DT_INT64} elements that determines which of the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java index 735a080bd78..b933563c4b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByReducerDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * Creates a dataset that computes a group-by on {@code input_dataset}. * Creates a dataset that computes a group-by on {@code input_dataset}. */ +@OpMetadata( + opType = GroupByReducerDataset.OP_NAME, + inputsClass = GroupByReducerDataset.Inputs.class +) public final class GroupByReducerDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class GroupByReducerDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private GroupByReducerDataset(Operation operation) { - super(operation); + public GroupByReducerDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -120,6 +126,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GroupByReducerDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java index 7b4bd4e1333..4acd2c56896 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/GroupByWindowDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * Creates a dataset that computes a windowed group-by on {@code input_dataset}. * // TODO(mrry): Support non-int64 keys. */ +@OpMetadata( + opType = GroupByWindowDataset.OP_NAME, + inputsClass = GroupByWindowDataset.Inputs.class +) public final class GroupByWindowDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class GroupByWindowDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private GroupByWindowDataset(Operation operation) { - super(operation); + public GroupByWindowDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -109,6 +115,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = GroupByWindowDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java index 995d5514c4a..fa94ae55315 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IgnoreErrorsDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a dataset that contains the elements of {@code input_dataset} ignoring errors. */ +@OpMetadata( + opType = IgnoreErrorsDataset.OP_NAME, + inputsClass = IgnoreErrorsDataset.Inputs.class +) public final class IgnoreErrorsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class IgnoreErrorsDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private IgnoreErrorsDataset(Operation operation) { - super(operation); + public IgnoreErrorsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options logWarning(Boolean logWarning) { } } + @OpInputsMetadata( + outputsClass = IgnoreErrorsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java index d8232ba0743..fd4d5181956 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/IteratorGetDevice.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Returns the name of the device on which {@code resource} has been placed. */ +@OpMetadata( + opType = IteratorGetDevice.OP_NAME, + inputsClass = IteratorGetDevice.Inputs.class +) public final class IteratorGetDevice extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class IteratorGetDevice extends RawOp implements Operand { private Output device; - private IteratorGetDevice(Operation operation) { - super(operation); + public IteratorGetDevice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; device = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return device; } + @OpInputsMetadata( + outputsClass = IteratorGetDevice.class + ) public static class Inputs extends RawOpInputs { /** * The resource input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java index 5a772ac68c6..d8658c4843f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LatencyStatsDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Records the latency of producing {@code input_dataset} elements in a StatsAggregator. */ +@OpMetadata( + opType = LatencyStatsDataset.OP_NAME, + inputsClass = LatencyStatsDataset.Inputs.class +) public final class LatencyStatsDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class LatencyStatsDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private LatencyStatsDataset(Operation operation) { - super(operation); + public LatencyStatsDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = LatencyStatsDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java index a19e2f1dc41..07e1bec2735 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/LmdbDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The ExperimentalLMDBDataset operation */ +@OpMetadata( + opType = LmdbDataset.OP_NAME, + inputsClass = LmdbDataset.Inputs.class +) public final class LmdbDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class LmdbDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private LmdbDataset(Operation operation) { - super(operation); + public LmdbDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = LmdbDataset.class + ) public static class Inputs extends RawOpInputs { /** * The filenames input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java index 81165535555..6b22e495b2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapAndBatchDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -43,6 +45,10 @@ *

    Unlike a "MapDataset", which applies {@code f} sequentially, this dataset invokes up * to {@code batch_size * num_parallel_batches} copies of {@code f} in parallel. */ +@OpMetadata( + opType = MapAndBatchDataset.OP_NAME, + inputsClass = MapAndBatchDataset.Inputs.class +) public final class MapAndBatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class MapAndBatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private MapAndBatchDataset(Operation operation) { - super(operation); + public MapAndBatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -155,6 +161,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = MapAndBatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java index dac1b4bbc4b..64d41ecb3b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MapDataset.java @@ -31,12 +31,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a dataset that applies {@code f} to the outputs of {@code input_dataset}. */ +@OpMetadata( + opType = MapDataset.OP_NAME, + inputsClass = MapDataset.Inputs.class +) public final class MapDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class MapDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private MapDataset(Operation operation) { - super(operation); + public MapDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -162,6 +168,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = MapDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java index d56e2d11450..cf3fb557d72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MatchingFilesDataset.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * The ExperimentalMatchingFilesDataset operation */ +@OpMetadata( + opType = MatchingFilesDataset.OP_NAME, + inputsClass = MatchingFilesDataset.Inputs.class +) public final class MatchingFilesDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class MatchingFilesDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private MatchingFilesDataset(Operation operation) { - super(operation); + public MatchingFilesDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = MatchingFilesDataset.class + ) public static class Inputs extends RawOpInputs { /** * The patterns input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java index a934b3641f2..bd1e4db285d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/MaxIntraOpParallelismDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that overrides the maximum intra-op parallelism. */ +@OpMetadata( + opType = MaxIntraOpParallelismDataset.OP_NAME, + inputsClass = MaxIntraOpParallelismDataset.Inputs.class +) public final class MaxIntraOpParallelismDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class MaxIntraOpParallelismDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private MaxIntraOpParallelismDataset(Operation operation) { - super(operation); + public MaxIntraOpParallelismDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = MaxIntraOpParallelismDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java index a8098c45132..91d544a288d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/NonSerializableDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * The ExperimentalNonSerializableDataset operation */ +@OpMetadata( + opType = NonSerializableDataset.OP_NAME, + inputsClass = NonSerializableDataset.Inputs.class +) public final class NonSerializableDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class NonSerializableDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private NonSerializableDataset(Operation operation) { - super(operation); + public NonSerializableDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = NonSerializableDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java index 67dfc418202..17f05939fa5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParallelInterleaveDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; @@ -45,6 +47,10 @@ * allows the training step to proceed so long as some data is available. *

    !! WARNING !! This dataset is not deterministic! */ +@OpMetadata( + opType = ParallelInterleaveDataset.OP_NAME, + inputsClass = ParallelInterleaveDataset.Inputs.class +) public final class ParallelInterleaveDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -54,8 +60,8 @@ public final class ParallelInterleaveDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private ParallelInterleaveDataset(Operation operation) { - super(operation); + public ParallelInterleaveDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -119,6 +125,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ParallelInterleaveDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java index 8805d0ebf5e..cc1bcb7934e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ParseExampleDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Transforms {@code input_dataset} containing {@code Example} protos as vectors of DT_STRING into a dataset of {@code Tensor} or {@code SparseTensor} objects representing the parsed features. */ +@OpMetadata( + opType = ParseExampleDataset.OP_NAME, + inputsClass = ParseExampleDataset.Inputs.class +) public final class ParseExampleDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class ParseExampleDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ParseExampleDataset(Operation operation) { - super(operation); + public ParseExampleDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -170,6 +176,9 @@ public Options sloppy(Boolean sloppy) { } } + @OpInputsMetadata( + outputsClass = ParseExampleDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java index 36aafe3272e..e5ace0daa57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/PrivateThreadPoolDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = PrivateThreadPoolDataset.OP_NAME, + inputsClass = PrivateThreadPoolDataset.Inputs.class +) public final class PrivateThreadPoolDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class PrivateThreadPoolDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private PrivateThreadPoolDataset(Operation operation) { - super(operation); + public PrivateThreadPoolDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = PrivateThreadPoolDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java index 416f4ce7d62..d1423c9ba75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RandomDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a Dataset that returns pseudorandom numbers. */ +@OpMetadata( + opType = RandomDataset.OP_NAME, + inputsClass = RandomDataset.Inputs.class +) public final class RandomDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RandomDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RandomDataset(Operation operation) { - super(operation); + public RandomDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = RandomDataset.class + ) public static class Inputs extends RawOpInputs { /** * A scalar seed for the random number generator. If either seed or diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java index 1ae694d9914..04f8c393f71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/RebatchDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * Creates a dataset that changes the batch size of the dataset to current batch * size // num_replicas. */ +@OpMetadata( + opType = RebatchDataset.OP_NAME, + inputsClass = RebatchDataset.Inputs.class +) public final class RebatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class RebatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RebatchDataset(Operation operation) { - super(operation); + public RebatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -138,6 +144,9 @@ public Options useFallback(Boolean useFallback) { } } + @OpInputsMetadata( + outputsClass = RebatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java index b02550f116a..c3e468cf121 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ScanDataset.java @@ -31,12 +31,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a dataset successively reduces {@code f} over the elements of {@code input_dataset}. */ +@OpMetadata( + opType = ScanDataset.OP_NAME, + inputsClass = ScanDataset.Inputs.class +) public final class ScanDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class ScanDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ScanDataset(Operation operation) { - super(operation); + public ScanDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -138,6 +144,9 @@ public Options preserveCardinality(Boolean preserveCardinality) { } } + @OpInputsMetadata( + outputsClass = ScanDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java index 6bcf9d87e29..7ea20feef54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SetStatsAggregatorDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The ExperimentalSetStatsAggregatorDataset operation */ +@OpMetadata( + opType = SetStatsAggregatorDataset.OP_NAME, + inputsClass = SetStatsAggregatorDataset.Inputs.class +) public final class SetStatsAggregatorDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class SetStatsAggregatorDataset extends RawOp implements Operand handle; @SuppressWarnings("unchecked") - private SetStatsAggregatorDataset(Operation operation) { - super(operation); + public SetStatsAggregatorDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SetStatsAggregatorDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java index ae879e0fbf4..2327119a60e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SleepDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * The ExperimentalSleepDataset operation */ +@OpMetadata( + opType = SleepDataset.OP_NAME, + inputsClass = SleepDataset.Inputs.class +) public final class SleepDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class SleepDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SleepDataset(Operation operation) { - super(operation); + public SleepDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SleepDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java index 7939710ff4a..922a9b8ad0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SlidingWindowDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that passes a sliding window over {@code input_dataset}. */ +@OpMetadata( + opType = SlidingWindowDataset.OP_NAME, + inputsClass = SlidingWindowDataset.Inputs.class +) public final class SlidingWindowDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class SlidingWindowDataset extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private SlidingWindowDataset(Operation operation) { - super(operation); + public SlidingWindowDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -102,6 +108,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SlidingWindowDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java index 3a62b4c17bd..23f7b31ef02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/SqlDataset.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * Creates a dataset that executes a SQL query and emits rows of the result set. */ +@OpMetadata( + opType = SqlDataset.OP_NAME, + inputsClass = SqlDataset.Inputs.class +) public final class SqlDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class SqlDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private SqlDataset(Operation operation) { - super(operation); + public SqlDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = SqlDataset.class + ) public static class Inputs extends RawOpInputs { /** * The database type. Currently, the only supported type is 'sqlite'. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java index ce6bc4bd54b..be406a52156 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorHandle.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Creates a statistics manager resource. */ +@OpMetadata( + opType = StatsAggregatorHandle.OP_NAME, + inputsClass = StatsAggregatorHandle.Inputs.class +) public final class StatsAggregatorHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StatsAggregatorHandle extends RawOp implements Operand private Output handle; @SuppressWarnings("unchecked") - private StatsAggregatorHandle(Operation operation) { - super(operation); + public StatsAggregatorHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = StatsAggregatorHandle.class + ) public static class Inputs extends RawOpInputs { /** * The container attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java index 61aeec7600b..02899c3457e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/StatsAggregatorSummary.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Produces a summary of any statistics recorded by the given statistics manager. */ +@OpMetadata( + opType = StatsAggregatorSummary.OP_NAME, + inputsClass = StatsAggregatorSummary.Inputs.class +) public final class StatsAggregatorSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StatsAggregatorSummary extends RawOp implements Operand summary; - private StatsAggregatorSummary(Operation operation) { - super(operation); + public StatsAggregatorSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = StatsAggregatorSummary.class + ) public static class Inputs extends RawOpInputs { /** * The iterator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java index f11aa6444dd..f14151ab235 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/TakeWhileDataset.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ *

  • One tensor for each value in {@code other_arguments}.
  • * */ +@OpMetadata( + opType = TakeWhileDataset.OP_NAME, + inputsClass = TakeWhileDataset.Inputs.class +) public final class TakeWhileDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class TakeWhileDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private TakeWhileDataset(Operation operation) { - super(operation); + public TakeWhileDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -104,6 +110,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = TakeWhileDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java index 379c0a245a3..6a4eedd6649 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = ThreadPoolDataset.OP_NAME, + inputsClass = ThreadPoolDataset.Inputs.class +) public final class ThreadPoolDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class ThreadPoolDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ThreadPoolDataset(Operation operation) { - super(operation); + public ThreadPoolDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = ThreadPoolDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java index 27a66b8df1a..98d8efa2836 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/ThreadPoolHandle.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Creates a dataset that uses a custom thread pool to compute {@code input_dataset}. */ +@OpMetadata( + opType = ThreadPoolHandle.OP_NAME, + inputsClass = ThreadPoolHandle.Inputs.class +) public final class ThreadPoolHandle extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class ThreadPoolHandle extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private ThreadPoolHandle(Operation operation) { - super(operation); + public ThreadPoolHandle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -177,6 +183,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = ThreadPoolHandle.class + ) public static class Inputs extends RawOpInputs { /** * The number of threads in the thread pool. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java index b427d4e576b..e4aba42d75b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UnbatchDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * A dataset that splits the elements of its input into multiple elements. */ +@OpMetadata( + opType = UnbatchDataset.OP_NAME, + inputsClass = UnbatchDataset.Inputs.class +) public final class UnbatchDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class UnbatchDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private UnbatchDataset(Operation operation) { - super(operation); + public UnbatchDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = UnbatchDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java index 6d81f2ab1a3..c2b0e001679 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/experimental/UniqueDataset.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Creates a dataset that contains the unique elements of {@code input_dataset}. */ +@OpMetadata( + opType = UniqueDataset.OP_NAME, + inputsClass = UniqueDataset.Inputs.class +) public final class UniqueDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class UniqueDataset extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private UniqueDataset(Operation operation) { - super(operation); + public UniqueDataset(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = UniqueDataset.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java index 644bc1091de..b1db34d171e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CheckNumerics.OP_NAME, + inputsClass = CheckNumerics.Inputs.class +) public final class CheckNumerics extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class CheckNumerics extends RawOp implements Ope private Output output; - private CheckNumerics(Operation operation) { - super(operation); + public CheckNumerics(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CheckNumerics.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java index e72c3442a7d..568577a0dcc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DebugGradientIdentity.OP_NAME, + inputsClass = DebugGradientIdentity.Inputs.class +) public final class DebugGradientIdentity extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class DebugGradientIdentity extends RawOp implemen private Output output; - private DebugGradientIdentity(Operation operation) { - super(operation); + public DebugGradientIdentity(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DebugGradientIdentity.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java index 1546bc11128..e8ce9ccbd17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DebugGradientRefIdentity.OP_NAME, + inputsClass = DebugGradientRefIdentity.Inputs.class +) public final class DebugGradientRefIdentity extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class DebugGradientRefIdentity extends RawOp imple private Output output; - private DebugGradientRefIdentity(Operation operation) { - super(operation); + public DebugGradientRefIdentity(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DebugGradientRefIdentity.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java index 01167c395e6..5036e9d28af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DebugIdentity.OP_NAME, + inputsClass = DebugIdentity.Inputs.class +) public final class DebugIdentity extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class DebugIdentity extends RawOp implements Opera private Output output; - private DebugIdentity(Operation operation) { - super(operation); + public DebugIdentity(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -316,6 +322,9 @@ public Options tfdbgRunId(String tfdbgRunId) { } } + @OpInputsMetadata( + outputsClass = DebugIdentity.class + ) public static class Inputs extends RawOpInputs> { /** * Input tensor, non-Reference type diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java index 9925d851593..59969c5f9f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNanCount.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * Debug NaN Value Counter Op. * Counts number of NaNs in the input tensor, for debugging. */ +@OpMetadata( + opType = DebugNanCount.OP_NAME, + inputsClass = DebugNanCount.Inputs.class +) public final class DebugNanCount extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class DebugNanCount extends RawOp implements Operand { private Output output; - private DebugNanCount(Operation operation) { - super(operation); + public DebugNanCount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -237,6 +243,9 @@ public Options gatedGrpc(Boolean gatedGrpc) { } } + @OpInputsMetadata( + outputsClass = DebugNanCount.class + ) public static class Inputs extends RawOpInputs { /** * Input tensor, non-Reference type. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java index 6dd55b043fc..181e399efbf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DebugNumericsSummary.OP_NAME, + inputsClass = DebugNumericsSummary.Inputs.class +) public final class DebugNumericsSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class DebugNumericsSummary extends RawOp impleme private Output output; - private DebugNumericsSummary(Operation operation) { - super(operation); + public DebugNumericsSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -264,6 +270,9 @@ public Options tensorId(Long tensorId) { } } + @OpInputsMetadata( + outputsClass = DebugNumericsSummary.class + ) public static class Inputs extends RawOpInputs> { /** * Input tensor, to be summarized by the op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java index fe38870830d..2cbb04ff1a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = NcclAllReduce.OP_NAME, + inputsClass = NcclAllReduce.Inputs.class +) public final class NcclAllReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -53,8 +59,8 @@ public final class NcclAllReduce extends RawOp implements Ope private Output data; - private NcclAllReduce(Operation operation) { - super(operation); + public NcclAllReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return data; } + @OpInputsMetadata( + outputsClass = NcclAllReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java index 79c330beaa1..bd70773b6f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = NcclBroadcast.OP_NAME, + inputsClass = NcclBroadcast.Inputs.class +) public final class NcclBroadcast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class NcclBroadcast extends RawOp implements Ope private Output output; - private NcclBroadcast(Operation operation) { - super(operation); + public NcclBroadcast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = NcclBroadcast.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java index 759c6ea5a92..94d0e2abdb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = NcclReduce.OP_NAME, + inputsClass = NcclReduce.Inputs.class +) public final class NcclReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class NcclReduce extends RawOp implements Operan private Output data; - private NcclReduce(Operation operation) { - super(operation); + public NcclReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return data; } + @OpInputsMetadata( + outputsClass = NcclReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java index a7de0ccc4e0..8de902fee91 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/AsString.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -50,6 +52,10 @@ * * */ +@OpMetadata( + opType = AsString.OP_NAME, + inputsClass = AsString.Inputs.class +) @Operator( group = "dtypes" ) @@ -61,8 +67,8 @@ public final class AsString extends RawOp implements Operand { private Output output; - private AsString(Operation operation) { - super(operation); + public AsString(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -250,6 +256,9 @@ public Options fill(String fill) { } } + @OpInputsMetadata( + outputsClass = AsString.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java index cc9cbb10ec2..556ba9cb99c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Cast.OP_NAME, + inputsClass = Cast.Inputs.class +) @Operator( group = "dtypes" ) @@ -48,8 +54,8 @@ public final class Cast extends RawOp implements Operand { private Output y; - private Cast(Operation operation) { - super(operation); + public Cast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options Truncate(Boolean Truncate) { } } + @OpInputsMetadata( + outputsClass = Cast.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java index 86d216c50c0..bcd586c8046 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -49,6 +51,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = Complex.OP_NAME, + inputsClass = Complex.Inputs.class +) @Operator( group = "dtypes" ) @@ -60,8 +66,8 @@ public final class Complex extends RawOp implements Operand private Output out; - private Complex(Operation operation) { - super(operation); + public Complex(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -103,6 +109,9 @@ public Output asOutput() { return out; } + @OpInputsMetadata( + outputsClass = Complex.class + ) public static class Inputs extends RawOpInputs> { /** * The real input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java index 44ac418d3d2..fb8c3dc3a3e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/ToBool.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -49,6 +51,10 @@ *

    This matches the behavior of If and While for determining if a tensor counts * as true/false for a branch condition. */ +@OpMetadata( + opType = ToBool.OP_NAME, + inputsClass = ToBool.Inputs.class +) public final class ToBool extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -57,8 +63,8 @@ public final class ToBool extends RawOp implements Operand { private Output output; - private ToBool(Operation operation) { - super(operation); + public ToBool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ToBool.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java index 7b0529beffe..011d1c7793e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesAggregateStats.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -34,6 +36,10 @@ * Aggregates the summary of accumulated stats for the batch. * The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket. */ +@OpMetadata( + opType = BoostedTreesAggregateStats.OP_NAME, + inputsClass = BoostedTreesAggregateStats.Inputs.class +) public final class BoostedTreesAggregateStats extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class BoostedTreesAggregateStats extends RawOp implements Operand statsSummary; - private BoostedTreesAggregateStats(Operation operation) { - super(operation); + public BoostedTreesAggregateStats(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; statsSummary = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return statsSummary; } + @OpInputsMetadata( + outputsClass = BoostedTreesAggregateStats.class + ) public static class Inputs extends RawOpInputs { /** * int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java index c9e18b2a292..3d2cf540ff5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesBucketize.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * An op that returns a list of float tensors, where each tensor represents the * bucketized values for a single feature. */ +@OpMetadata( + opType = BoostedTreesBucketize.OP_NAME, + inputsClass = BoostedTreesBucketize.Inputs.class +) public final class BoostedTreesBucketize extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class BoostedTreesBucketize extends RawOp implements Iterable> buckets; @SuppressWarnings("unchecked") - private BoostedTreesBucketize(Operation operation) { - super(operation); + public BoostedTreesBucketize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int bucketsLength = operation.outputListLength("buckets"); buckets = Arrays.asList((Output[]) operation.outputList(outputIdx, bucketsLength)); @@ -90,6 +96,9 @@ public Iterator> iterator() { return (Iterator) buckets.iterator(); } + @OpInputsMetadata( + outputsClass = BoostedTreesBucketize.class + ) public static class Inputs extends RawOpInputs { /** * float; List of Rank 1 Tensor each containing float values for a single feature. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java index 8979b37a37c..6157ee489e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ *

    In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). *

    The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. */ +@OpMetadata( + opType = BoostedTreesCalculateBestFeatureSplit.OP_NAME, + inputsClass = BoostedTreesCalculateBestFeatureSplit.Inputs.class +) public final class BoostedTreesCalculateBestFeatureSplit extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -58,8 +64,8 @@ public final class BoostedTreesCalculateBestFeatureSplit extends RawOp { private Output splitWithDefaultDirections; - private BoostedTreesCalculateBestFeatureSplit(Operation operation) { - super(operation); + public BoostedTreesCalculateBestFeatureSplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nodeIds = operation.output(outputIdx++); gains = operation.output(outputIdx++); @@ -205,6 +211,9 @@ public Options splitType(String splitType) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesCalculateBestFeatureSplit.class + ) public static class Inputs extends RawOpInputs { /** * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java index d733c5f82bc..32e5e7b99ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestFeatureSplitV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ *

    In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). *

    The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. */ +@OpMetadata( + opType = BoostedTreesCalculateBestFeatureSplitV2.OP_NAME, + inputsClass = BoostedTreesCalculateBestFeatureSplitV2.Inputs.class +) public final class BoostedTreesCalculateBestFeatureSplitV2 extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -61,8 +67,8 @@ public final class BoostedTreesCalculateBestFeatureSplitV2 extends RawOp { private Output splitWithDefaultDirections; - private BoostedTreesCalculateBestFeatureSplitV2(Operation operation) { - super(operation); + public BoostedTreesCalculateBestFeatureSplitV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nodeIds = operation.output(outputIdx++); gains = operation.output(outputIdx++); @@ -184,6 +190,9 @@ public Output splitWithDefaultDirections() { return splitWithDefaultDirections; } + @OpInputsMetadata( + outputsClass = BoostedTreesCalculateBestFeatureSplitV2.class + ) public static class Inputs extends RawOpInputs { /** * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java index 4d77413901b..524fce9e204 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCalculateBestGainsPerFeature.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ *

    The length of output lists are all of the same length, {@code num_features}. * The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. */ +@OpMetadata( + opType = BoostedTreesCalculateBestGainsPerFeature.OP_NAME, + inputsClass = BoostedTreesCalculateBestGainsPerFeature.Inputs.class +) public final class BoostedTreesCalculateBestGainsPerFeature extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -57,8 +63,8 @@ public final class BoostedTreesCalculateBestGainsPerFeature extends RawOp { private List> rightNodeContribsList; @SuppressWarnings("unchecked") - private BoostedTreesCalculateBestGainsPerFeature(Operation operation) { - super(operation); + public BoostedTreesCalculateBestGainsPerFeature(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int nodeIdsListLength = operation.outputListLength("node_ids_list"); nodeIdsList = Arrays.asList((Output[]) operation.outputList(outputIdx, nodeIdsListLength)); @@ -153,6 +159,9 @@ public List> rightNodeContribsList() { return rightNodeContribsList; } + @OpInputsMetadata( + outputsClass = BoostedTreesCalculateBestGainsPerFeature.class + ) public static class Inputs extends RawOpInputs { /** * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java index 320bebfbc4c..d1f0c32bc1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCenterBias.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. */ +@OpMetadata( + opType = BoostedTreesCenterBias.OP_NAME, + inputsClass = BoostedTreesCenterBias.Inputs.class +) public final class BoostedTreesCenterBias extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class BoostedTreesCenterBias extends RawOp implements Operand continueCentering; - private BoostedTreesCenterBias(Operation operation) { - super(operation); + public BoostedTreesCenterBias(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; continueCentering = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return continueCentering; } + @OpInputsMetadata( + outputsClass = BoostedTreesCenterBias.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java index 31df4690605..154efbd1298 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateEnsemble.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -33,14 +35,18 @@ /** * Creates a tree ensemble model and returns a handle to it. */ +@OpMetadata( + opType = BoostedTreesCreateEnsemble.OP_NAME, + inputsClass = BoostedTreesCreateEnsemble.Inputs.class +) public final class BoostedTreesCreateEnsemble extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesCreateEnsemble"; - private BoostedTreesCreateEnsemble(Operation operation) { - super(operation); + public BoostedTreesCreateEnsemble(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static BoostedTreesCreateEnsemble create(Scope scope, return new BoostedTreesCreateEnsemble(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BoostedTreesCreateEnsemble.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble resource to be created. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java index 9d5a68ea32d..fae66e5afe4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesCreateQuantileStreamResource.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -33,14 +35,18 @@ /** * Create the Resource for Quantile Streams. */ +@OpMetadata( + opType = BoostedTreesCreateQuantileStreamResource.OP_NAME, + inputsClass = BoostedTreesCreateQuantileStreamResource.Inputs.class +) public final class BoostedTreesCreateQuantileStreamResource extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesCreateQuantileStreamResource"; - private BoostedTreesCreateQuantileStreamResource(Operation operation) { - super(operation); + public BoostedTreesCreateQuantileStreamResource(Operation operation) { + super(operation, OP_NAME); } /** @@ -104,6 +110,9 @@ public Options maxElements(Long maxElements) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesCreateQuantileStreamResource.class + ) public static class Inputs extends RawOpInputs { /** * resource; Handle to quantile stream resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java index fbeb4604ca7..6b1feb1f49d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesDeserializeEnsemble.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Deserializes a serialized tree ensemble config and replaces current tree * ensemble. */ +@OpMetadata( + opType = BoostedTreesDeserializeEnsemble.OP_NAME, + inputsClass = BoostedTreesDeserializeEnsemble.Inputs.class +) public final class BoostedTreesDeserializeEnsemble extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesDeserializeEnsemble"; - private BoostedTreesDeserializeEnsemble(Operation operation) { - super(operation); + public BoostedTreesDeserializeEnsemble(Operation operation) { + super(operation, OP_NAME); } /** @@ -66,6 +72,9 @@ public static BoostedTreesDeserializeEnsemble create(Scope scope, return new BoostedTreesDeserializeEnsemble(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BoostedTreesDeserializeEnsemble.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java index 8fffb3d912a..2108d30c45f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesEnsembleResourceHandleOp.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Creates a handle to a BoostedTreesEnsembleResource */ +@OpMetadata( + opType = BoostedTreesEnsembleResourceHandleOp.OP_NAME, + inputsClass = BoostedTreesEnsembleResourceHandleOp.Inputs.class +) public final class BoostedTreesEnsembleResourceHandleOp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class BoostedTreesEnsembleResourceHandleOp extends RawOp implements private Output resource; @SuppressWarnings("unchecked") - private BoostedTreesEnsembleResourceHandleOp(Operation operation) { - super(operation); + public BoostedTreesEnsembleResourceHandleOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resource = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesEnsembleResourceHandleOp.class + ) public static class Inputs extends RawOpInputs { /** * The container attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java index 0a8a5b87ca1..4fcba1a9e4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesExampleDebugOutputs.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * such as getting split feature ids and logits after each split along the decision * path used to compute directional feature contributions. */ +@OpMetadata( + opType = BoostedTreesExampleDebugOutputs.OP_NAME, + inputsClass = BoostedTreesExampleDebugOutputs.Inputs.class +) public final class BoostedTreesExampleDebugOutputs extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BoostedTreesExampleDebugOutputs extends RawOp implements Oper private Output examplesDebugOutputsSerialized; - private BoostedTreesExampleDebugOutputs(Operation operation) { - super(operation); + public BoostedTreesExampleDebugOutputs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; examplesDebugOutputsSerialized = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return examplesDebugOutputsSerialized; } + @OpInputsMetadata( + outputsClass = BoostedTreesExampleDebugOutputs.class + ) public static class Inputs extends RawOpInputs { /** * The treeEnsembleHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java index 04ff2d43bd4..c5af68bc7c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesFlushQuantileSummaries.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, * max_rank) for a single feature. */ +@OpMetadata( + opType = BoostedTreesFlushQuantileSummaries.OP_NAME, + inputsClass = BoostedTreesFlushQuantileSummaries.Inputs.class +) public final class BoostedTreesFlushQuantileSummaries extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class BoostedTreesFlushQuantileSummaries extends RawOp implements I private List> summaries; @SuppressWarnings("unchecked") - private BoostedTreesFlushQuantileSummaries(Operation operation) { - super(operation); + public BoostedTreesFlushQuantileSummaries(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int summariesLength = operation.outputListLength("summaries"); summaries = Arrays.asList((Output[]) operation.outputList(outputIdx, summariesLength)); @@ -89,6 +95,9 @@ public Iterator> iterator() { return (Iterator) summaries.iterator(); } + @OpInputsMetadata( + outputsClass = BoostedTreesFlushQuantileSummaries.class + ) public static class Inputs extends RawOpInputs { /** * resource handle referring to a QuantileStreamResource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java index 47da16b400c..5359f45b999 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesGetEnsembleStates.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. */ +@OpMetadata( + opType = BoostedTreesGetEnsembleStates.OP_NAME, + inputsClass = BoostedTreesGetEnsembleStates.Inputs.class +) public final class BoostedTreesGetEnsembleStates extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class BoostedTreesGetEnsembleStates extends RawOp { private Output lastLayerNodesRange; - private BoostedTreesGetEnsembleStates(Operation operation) { - super(operation); + public BoostedTreesGetEnsembleStates(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; stampToken = operation.output(outputIdx++); numTrees = operation.output(outputIdx++); @@ -123,6 +129,9 @@ public Output lastLayerNodesRange() { return lastLayerNodesRange; } + @OpInputsMetadata( + outputsClass = BoostedTreesGetEnsembleStates.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java index b94831dbbe8..51875c0a714 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeQuantileSummaries.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -37,6 +39,10 @@ * An op that takes a list of tensors (one tensor per feature) and outputs the * quantile summaries for each tensor. */ +@OpMetadata( + opType = BoostedTreesMakeQuantileSummaries.OP_NAME, + inputsClass = BoostedTreesMakeQuantileSummaries.Inputs.class +) public final class BoostedTreesMakeQuantileSummaries extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BoostedTreesMakeQuantileSummaries extends RawOp implements It private List> summaries; @SuppressWarnings("unchecked") - private BoostedTreesMakeQuantileSummaries(Operation operation) { - super(operation); + public BoostedTreesMakeQuantileSummaries(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int summariesLength = operation.outputListLength("summaries"); summaries = Arrays.asList((Output[]) operation.outputList(outputIdx, summariesLength)); @@ -92,6 +98,9 @@ public Iterator> iterator() { return (Iterator) summaries.iterator(); } + @OpInputsMetadata( + outputsClass = BoostedTreesMakeQuantileSummaries.class + ) public static class Inputs extends RawOpInputs { /** * float; List of Rank 1 Tensors each containing values for a single feature. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java index 62268ff047f..9bf16045532 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesMakeStatsSummary.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -35,6 +37,10 @@ * Makes the summary of accumulated stats for the batch. * The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. */ +@OpMetadata( + opType = BoostedTreesMakeStatsSummary.OP_NAME, + inputsClass = BoostedTreesMakeStatsSummary.Inputs.class +) public final class BoostedTreesMakeStatsSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BoostedTreesMakeStatsSummary extends RawOp implements Operand private Output statsSummary; - private BoostedTreesMakeStatsSummary(Operation operation) { - super(operation); + public BoostedTreesMakeStatsSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; statsSummary = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return statsSummary; } + @OpInputsMetadata( + outputsClass = BoostedTreesMakeStatsSummary.class + ) public static class Inputs extends RawOpInputs { /** * int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java index 292fbb00e52..52bf9539c72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesPredict.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * computes the logits. It is designed to be used during prediction. * It traverses all the trees and calculates the final score for each instance. */ +@OpMetadata( + opType = BoostedTreesPredict.OP_NAME, + inputsClass = BoostedTreesPredict.Inputs.class +) public final class BoostedTreesPredict extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class BoostedTreesPredict extends RawOp implements Operand logits; - private BoostedTreesPredict(Operation operation) { - super(operation); + public BoostedTreesPredict(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; logits = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return logits; } + @OpInputsMetadata( + outputsClass = BoostedTreesPredict.class + ) public static class Inputs extends RawOpInputs { /** * The treeEnsembleHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java index ccc5869c5ea..567bbc4e077 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceAddSummaries.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -36,14 +38,18 @@ * summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) * for a single feature. */ +@OpMetadata( + opType = BoostedTreesQuantileStreamResourceAddSummaries.OP_NAME, + inputsClass = BoostedTreesQuantileStreamResourceAddSummaries.Inputs.class +) public final class BoostedTreesQuantileStreamResourceAddSummaries extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesQuantileStreamResourceAddSummaries"; - private BoostedTreesQuantileStreamResourceAddSummaries(Operation operation) { - super(operation); + public BoostedTreesQuantileStreamResourceAddSummaries(Operation operation) { + super(operation, OP_NAME); } /** @@ -66,6 +72,9 @@ public static BoostedTreesQuantileStreamResourceAddSummaries create(Scope scope, return new BoostedTreesQuantileStreamResourceAddSummaries(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BoostedTreesQuantileStreamResourceAddSummaries.class + ) public static class Inputs extends RawOpInputs { /** * resource handle referring to a QuantileStreamResource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java index 264d083247c..0cc54ad8f71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceDeserialize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Deserialize bucket boundaries and ready flag into current QuantileAccumulator. * An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator. */ +@OpMetadata( + opType = BoostedTreesQuantileStreamResourceDeserialize.OP_NAME, + inputsClass = BoostedTreesQuantileStreamResourceDeserialize.Inputs.class +) public final class BoostedTreesQuantileStreamResourceDeserialize extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesQuantileStreamResourceDeserialize"; - private BoostedTreesQuantileStreamResourceDeserialize(Operation operation) { - super(operation); + public BoostedTreesQuantileStreamResourceDeserialize(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static BoostedTreesQuantileStreamResourceDeserialize create(Scope scope, return new BoostedTreesQuantileStreamResourceDeserialize(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BoostedTreesQuantileStreamResourceDeserialize.class + ) public static class Inputs extends RawOpInputs { /** * resource handle referring to a QuantileStreamResource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java index cdad72ec124..a3cef6232f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceFlush.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -33,14 +35,18 @@ * Flush the summaries for a quantile stream resource. * An op that flushes the summaries for a quantile stream resource. */ +@OpMetadata( + opType = BoostedTreesQuantileStreamResourceFlush.OP_NAME, + inputsClass = BoostedTreesQuantileStreamResourceFlush.Inputs.class +) public final class BoostedTreesQuantileStreamResourceFlush extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesQuantileStreamResourceFlush"; - private BoostedTreesQuantileStreamResourceFlush(Operation operation) { - super(operation); + public BoostedTreesQuantileStreamResourceFlush(Operation operation) { + super(operation, OP_NAME); } /** @@ -112,6 +118,9 @@ public Options generateQuantiles(Boolean generateQuantiles) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesQuantileStreamResourceFlush.class + ) public static class Inputs extends RawOpInputs { /** * resource handle referring to a QuantileStreamResource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java index 90146726c58..8f8e096c69e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceGetBucketBoundaries.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * An op that returns a list of float tensors for a quantile stream resource. Each * tensor is Rank 1 containing bucket boundaries for a single feature. */ +@OpMetadata( + opType = BoostedTreesQuantileStreamResourceGetBucketBoundaries.OP_NAME, + inputsClass = BoostedTreesQuantileStreamResourceGetBucketBoundaries.Inputs.class +) public final class BoostedTreesQuantileStreamResourceGetBucketBoundaries extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BoostedTreesQuantileStreamResourceGetBucketBoundaries extends private List> bucketBoundaries; @SuppressWarnings("unchecked") - private BoostedTreesQuantileStreamResourceGetBucketBoundaries(Operation operation) { - super(operation); + public BoostedTreesQuantileStreamResourceGetBucketBoundaries(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int bucketBoundariesLength = operation.outputListLength("bucket_boundaries"); bucketBoundaries = Arrays.asList((Output[]) operation.outputList(outputIdx, bucketBoundariesLength)); @@ -88,6 +94,9 @@ public Iterator> iterator() { return (Iterator) bucketBoundaries.iterator(); } + @OpInputsMetadata( + outputsClass = BoostedTreesQuantileStreamResourceGetBucketBoundaries.class + ) public static class Inputs extends RawOpInputs { /** * resource handle referring to a QuantileStreamResource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java index 2d9cc1649f7..4d6c0fdd948 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesQuantileStreamResourceHandleOp.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * Creates a handle to a BoostedTreesQuantileStreamResource. */ +@OpMetadata( + opType = BoostedTreesQuantileStreamResourceHandleOp.OP_NAME, + inputsClass = BoostedTreesQuantileStreamResourceHandleOp.Inputs.class +) public final class BoostedTreesQuantileStreamResourceHandleOp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class BoostedTreesQuantileStreamResourceHandleOp extends RawOp impl private Output resource; @SuppressWarnings("unchecked") - private BoostedTreesQuantileStreamResourceHandleOp(Operation operation) { - super(operation); + public BoostedTreesQuantileStreamResourceHandleOp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resource = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesQuantileStreamResourceHandleOp.class + ) public static class Inputs extends RawOpInputs { /** * The container attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java index bcec982dda1..5d2c38e2c24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSerializeEnsemble.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Serializes the tree ensemble to a proto. */ +@OpMetadata( + opType = BoostedTreesSerializeEnsemble.OP_NAME, + inputsClass = BoostedTreesSerializeEnsemble.Inputs.class +) public final class BoostedTreesSerializeEnsemble extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class BoostedTreesSerializeEnsemble extends RawOp { private Output treeEnsembleSerialized; - private BoostedTreesSerializeEnsemble(Operation operation) { - super(operation); + public BoostedTreesSerializeEnsemble(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; stampToken = operation.output(outputIdx++); treeEnsembleSerialized = operation.output(outputIdx++); @@ -86,6 +92,9 @@ public Output treeEnsembleSerialized() { return treeEnsembleSerialized; } + @OpInputsMetadata( + outputsClass = BoostedTreesSerializeEnsemble.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java index 1a0ae09deb0..42788fc3b7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseAggregateStats.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -34,6 +36,10 @@ * Aggregates the summary of accumulated stats for the batch. * The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id. */ +@OpMetadata( + opType = BoostedTreesSparseAggregateStats.OP_NAME, + inputsClass = BoostedTreesSparseAggregateStats.Inputs.class +) public final class BoostedTreesSparseAggregateStats extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class BoostedTreesSparseAggregateStats extends RawOp { private Output statsSummaryShape; - private BoostedTreesSparseAggregateStats(Operation operation) { - super(operation); + public BoostedTreesSparseAggregateStats(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; statsSummaryIndices = operation.output(outputIdx++); statsSummaryValues = operation.output(outputIdx++); @@ -127,6 +133,9 @@ public Output statsSummaryShape() { return statsSummaryShape; } + @OpInputsMetadata( + outputsClass = BoostedTreesSparseAggregateStats.class + ) public static class Inputs extends RawOpInputs { /** * int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java index a05b6234b12..235c28b624b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesSparseCalculateBestFeatureSplit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ *

    In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). *

    The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. */ +@OpMetadata( + opType = BoostedTreesSparseCalculateBestFeatureSplit.OP_NAME, + inputsClass = BoostedTreesSparseCalculateBestFeatureSplit.Inputs.class +) public final class BoostedTreesSparseCalculateBestFeatureSplit extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -58,8 +64,8 @@ public final class BoostedTreesSparseCalculateBestFeatureSplit extends RawOp { private Output splitWithDefaultDirections; - private BoostedTreesSparseCalculateBestFeatureSplit(Operation operation) { - super(operation); + public BoostedTreesSparseCalculateBestFeatureSplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nodeIds = operation.output(outputIdx++); gains = operation.output(outputIdx++); @@ -211,6 +217,9 @@ public Options splitType(String splitType) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesSparseCalculateBestFeatureSplit.class + ) public static class Inputs extends RawOpInputs { /** * A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within {@code stats_summary_list}. The nodes are iterated between the two nodes specified by the tensor, as like {@code for node_id in range(node_id_range[0], node_id_range[1])} (Note that the last index node_id_range[1] is exclusive). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java index e3114aacc16..9a9199ce224 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesTrainingPredict.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * It traverses the trees starting from cached tree id and cached node id and * calculates the updates to be pushed to the cache. */ +@OpMetadata( + opType = BoostedTreesTrainingPredict.OP_NAME, + inputsClass = BoostedTreesTrainingPredict.Inputs.class +) public final class BoostedTreesTrainingPredict extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class BoostedTreesTrainingPredict extends RawOp { private Output nodeIds; - private BoostedTreesTrainingPredict(Operation operation) { - super(operation); + public BoostedTreesTrainingPredict(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; partialLogits = operation.output(outputIdx++); treeIds = operation.output(outputIdx++); @@ -117,6 +123,9 @@ public Output nodeIds() { return nodeIds; } + @OpInputsMetadata( + outputsClass = BoostedTreesTrainingPredict.class + ) public static class Inputs extends RawOpInputs { /** * The treeEnsembleHandle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java index f4e436613e2..bc41e4ee4c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsemble.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -35,14 +37,18 @@ * Updates the tree ensemble by either adding a layer to the last tree being grown * or by starting a new tree. */ +@OpMetadata( + opType = BoostedTreesUpdateEnsemble.OP_NAME, + inputsClass = BoostedTreesUpdateEnsemble.Inputs.class +) public final class BoostedTreesUpdateEnsemble extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesUpdateEnsemble"; - private BoostedTreesUpdateEnsemble(Operation operation) { - super(operation); + public BoostedTreesUpdateEnsemble(Operation operation) { + super(operation, OP_NAME); } /** @@ -92,6 +98,9 @@ public static BoostedTreesUpdateEnsemble create(Scope scope, return new BoostedTreesUpdateEnsemble(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = BoostedTreesUpdateEnsemble.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the ensemble variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java index ba73b86fac6..0542c89a28b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/BoostedTreesUpdateEnsembleV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -36,14 +38,18 @@ * Updates the tree ensemble by adding a layer to the last tree being grown * or by starting a new tree. */ +@OpMetadata( + opType = BoostedTreesUpdateEnsembleV2.OP_NAME, + inputsClass = BoostedTreesUpdateEnsembleV2.Inputs.class +) public final class BoostedTreesUpdateEnsembleV2 extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "BoostedTreesUpdateEnsembleV2"; - private BoostedTreesUpdateEnsembleV2(Operation operation) { - super(operation); + public BoostedTreesUpdateEnsembleV2(Operation operation) { + super(operation, OP_NAME); } /** @@ -167,6 +173,9 @@ public Options numGroups(Long numGroups) { } } + @OpInputsMetadata( + outputsClass = BoostedTreesUpdateEnsembleV2.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the ensemble variable. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java index 24e8f90f713..68964abb892 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesEnsembleInitialized.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; /** * Checks whether a tree ensemble has been initialized. */ +@OpMetadata( + opType = IsBoostedTreesEnsembleInitialized.OP_NAME, + inputsClass = IsBoostedTreesEnsembleInitialized.Inputs.class +) public final class IsBoostedTreesEnsembleInitialized extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class IsBoostedTreesEnsembleInitialized extends RawOp implements Op private Output isInitialized; - private IsBoostedTreesEnsembleInitialized(Operation operation) { - super(operation); + public IsBoostedTreesEnsembleInitialized(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; isInitialized = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return isInitialized; } + @OpInputsMetadata( + outputsClass = IsBoostedTreesEnsembleInitialized.class + ) public static class Inputs extends RawOpInputs { /** * Handle to the tree ensemble resource. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java index 9a15583349a..21db9a35455 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/estimator/IsBoostedTreesQuantileStreamResourceInitialized.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ * Checks whether a quantile stream has been initialized. * An Op that checks if quantile stream resource is initialized. */ +@OpMetadata( + opType = IsBoostedTreesQuantileStreamResourceInitialized.OP_NAME, + inputsClass = IsBoostedTreesQuantileStreamResourceInitialized.Inputs.class +) public final class IsBoostedTreesQuantileStreamResourceInitialized extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class IsBoostedTreesQuantileStreamResourceInitialized extends RawOp private Output isInitialized; - private IsBoostedTreesQuantileStreamResourceInitialized(Operation operation) { - super(operation); + public IsBoostedTreesQuantileStreamResourceInitialized(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; isInitialized = operation.output(outputIdx++); } @@ -79,6 +85,9 @@ public Output asOutput() { return isInitialized; } + @OpInputsMetadata( + outputsClass = IsBoostedTreesQuantileStreamResourceInitialized.class + ) public static class Inputs extends RawOpInputs { /** * resource; The reference to quantile stream resource handle. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java index 3e234b8c767..091153bfd77 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AdjustContrast.OP_NAME, + inputsClass = AdjustContrast.Inputs.class +) @Operator( group = "image" ) @@ -55,8 +61,8 @@ public final class AdjustContrast extends RawOp implements Op private Output output; - private AdjustContrast(Operation operation) { - super(operation); + public AdjustContrast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = AdjustContrast.class + ) public static class Inputs extends RawOpInputs> { /** * Images to adjust. At least 3-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java index 99a4e8a28f6..b30f864511e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AdjustHue.OP_NAME, + inputsClass = AdjustHue.Inputs.class +) @Operator( group = "image" ) @@ -53,8 +59,8 @@ public final class AdjustHue extends RawOp implements Operand private Output output; - private AdjustHue(Operation operation) { - super(operation); + public AdjustHue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = AdjustHue.class + ) public static class Inputs extends RawOpInputs> { /** * Images to adjust. At least 3-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java index 42949c86350..1bc2f095dcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AdjustSaturation.OP_NAME, + inputsClass = AdjustSaturation.Inputs.class +) @Operator( group = "image" ) @@ -53,8 +59,8 @@ public final class AdjustSaturation extends RawOp implements private Output output; - private AdjustSaturation(Operation operation) { - super(operation); + public AdjustSaturation(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = AdjustSaturation.class + ) public static class Inputs extends RawOpInputs> { /** * Images to adjust. At least 3-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java index ffe6982c45b..ea56fa18101 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CombinedNonMaxSuppression.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -47,6 +49,10 @@ * The output of this operation is the final boxes, scores and classes tensor * returned after performing non_max_suppression. */ +@OpMetadata( + opType = CombinedNonMaxSuppression.OP_NAME, + inputsClass = CombinedNonMaxSuppression.Inputs.class +) @Operator( group = "image" ) @@ -64,8 +70,8 @@ public final class CombinedNonMaxSuppression extends RawOp { private Output validDetections; - private CombinedNonMaxSuppression(Operation operation) { - super(operation); + public CombinedNonMaxSuppression(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nmsedBoxes = operation.output(outputIdx++); nmsedScores = operation.output(outputIdx++); @@ -228,6 +234,9 @@ public Options clipBoxes(Boolean clipBoxes) { } } + @OpInputsMetadata( + outputsClass = CombinedNonMaxSuppression.class + ) public static class Inputs extends RawOpInputs { /** * A 4-D float tensor of shape {@code [batch_size, num_boxes, q, 4]}. If {@code q} is 1 then diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java index bafdeda6193..7ab253d6b3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -50,6 +52,10 @@ * {@code tf.image.resize_nearest_neighbor()}(depends on the {@code method} argument) with * {@code align_corners=True}. */ +@OpMetadata( + opType = CropAndResize.OP_NAME, + inputsClass = CropAndResize.Inputs.class +) @Operator( group = "image" ) @@ -61,8 +67,8 @@ public final class CropAndResize extends RawOp implements Operand { private Output crops; - private CropAndResize(Operation operation) { - super(operation); + public CropAndResize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; crops = operation.output(outputIdx++); } @@ -188,6 +194,9 @@ public Options extrapolationValue(Float extrapolationValue) { } } + @OpInputsMetadata( + outputsClass = CropAndResize.class + ) public static class Inputs extends RawOpInputs { /** * A 4-D tensor of shape {@code [batch, image_height, image_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java index fb4da6130e8..69bda4c6326 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradBoxes.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -36,6 +38,10 @@ /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. */ +@OpMetadata( + opType = CropAndResizeGradBoxes.OP_NAME, + inputsClass = CropAndResizeGradBoxes.Inputs.class +) @Operator( group = "image" ) @@ -47,8 +53,8 @@ public final class CropAndResizeGradBoxes extends RawOp implements Operand output; - private CropAndResizeGradBoxes(Operation operation) { - super(operation); + public CropAndResizeGradBoxes(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -139,6 +145,9 @@ public Options method(String method) { } } + @OpInputsMetadata( + outputsClass = CropAndResizeGradBoxes.class + ) public static class Inputs extends RawOpInputs { /** * A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java index 0f664e36fad..b754d75bc6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CropAndResizeGradImage.OP_NAME, + inputsClass = CropAndResizeGradImage.Inputs.class +) @Operator( group = "image" ) @@ -50,8 +56,8 @@ public final class CropAndResizeGradImage extends RawOp imple private Output output; - private CropAndResizeGradImage(Operation operation) { - super(operation); + public CropAndResizeGradImage(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options method(String method) { } } + @OpInputsMetadata( + outputsClass = CropAndResizeGradImage.class + ) public static class Inputs extends RawOpInputs> { /** * A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java index 3ddd568feb4..35a36990a63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeAndCropJpeg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -50,6 +52,10 @@ *

    It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. */ +@OpMetadata( + opType = DecodeAndCropJpeg.OP_NAME, + inputsClass = DecodeAndCropJpeg.Inputs.class +) @Operator( group = "image" ) @@ -61,8 +67,8 @@ public final class DecodeAndCropJpeg extends RawOp implements Operand { private Output image; - private DecodeAndCropJpeg(Operation operation) { - super(operation); + public DecodeAndCropJpeg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -283,6 +289,9 @@ public Options dctMethod(String dctMethod) { } } + @OpInputsMetadata( + outputsClass = DecodeAndCropJpeg.class + ) public static class Inputs extends RawOpInputs { /** * 0-D. The JPEG-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java index 12995a44d7d..404059fba22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeBmp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.TUint8; @@ -42,6 +44,10 @@ *

  • 4: output an RGBA image.
  • * */ +@OpMetadata( + opType = DecodeBmp.OP_NAME, + inputsClass = DecodeBmp.Inputs.class +) @Operator( group = "image" ) @@ -53,8 +59,8 @@ public final class DecodeBmp extends RawOp implements Operand { private Output image; - private DecodeBmp(Operation operation) { - super(operation); + public DecodeBmp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -128,6 +134,9 @@ public Options channels(Long channels) { } } + @OpInputsMetadata( + outputsClass = DecodeBmp.class + ) public static class Inputs extends RawOpInputs { /** * 0-D. The BMP-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java index f9ea2e04703..27d250f6e18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeGif.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.TUint8; @@ -42,6 +44,10 @@ *

    This op also supports decoding JPEGs and PNGs, though it is cleaner to use * {@code tf.io.decode_image}. */ +@OpMetadata( + opType = DecodeGif.OP_NAME, + inputsClass = DecodeGif.Inputs.class +) @Operator( group = "image" ) @@ -53,8 +59,8 @@ public final class DecodeGif extends RawOp implements Operand { private Output image; - private DecodeGif(Operation operation) { - super(operation); + public DecodeGif(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return image; } + @OpInputsMetadata( + outputsClass = DecodeGif.class + ) public static class Inputs extends RawOpInputs { /** * 0-D. The GIF-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java index 6a1720d52bf..a1eb37a26f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -54,6 +56,10 @@ * * @param data type for {@code image} output */ +@OpMetadata( + opType = DecodeImage.OP_NAME, + inputsClass = DecodeImage.Inputs.class +) @Operator( group = "image" ) @@ -65,8 +71,8 @@ public final class DecodeImage extends RawOp implements Opera private Output image; - private DecodeImage(Operation operation) { - super(operation); + public DecodeImage(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -193,6 +199,9 @@ public Options expandAnimations(Boolean expandAnimations) { } } + @OpInputsMetadata( + outputsClass = DecodeImage.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The encoded image bytes. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java index ecf649a14c7..674522acfa3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeJpeg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.TUint8; @@ -49,6 +51,10 @@ *

    This op also supports decoding PNGs and non-animated GIFs since the interface is * the same, though it is cleaner to use {@code tf.io.decode_image}. */ +@OpMetadata( + opType = DecodeJpeg.OP_NAME, + inputsClass = DecodeJpeg.Inputs.class +) @Operator( group = "image" ) @@ -60,8 +66,8 @@ public final class DecodeJpeg extends RawOp implements Operand { private Output image; - private DecodeJpeg(Operation operation) { - super(operation); + public DecodeJpeg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -279,6 +285,9 @@ public Options dctMethod(String dctMethod) { } } + @OpInputsMetadata( + outputsClass = DecodeJpeg.class + ) public static class Inputs extends RawOpInputs { /** * 0-D. The JPEG-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java index d0e58936125..bf75085ca17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -52,6 +54,10 @@ * * @param data type for {@code image} output */ +@OpMetadata( + opType = DecodePng.OP_NAME, + inputsClass = DecodePng.Inputs.class +) @Operator( group = "image" ) @@ -63,8 +69,8 @@ public final class DecodePng extends RawOp implements Operand private Output image; - private DecodePng(Operation operation) { - super(operation); + public DecodePng(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; image = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Options channels(Long channels) { } } + @OpInputsMetadata( + outputsClass = DecodePng.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The PNG-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java index 44bc9156c06..2621dd4f6e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DrawBoundingBoxes.OP_NAME, + inputsClass = DrawBoundingBoxes.Inputs.class +) @Operator( group = "image" ) @@ -57,8 +63,8 @@ public final class DrawBoundingBoxes extends RawOp implements private Output output; - private DrawBoundingBoxes(Operation operation) { - super(operation); + public DrawBoundingBoxes(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DrawBoundingBoxes.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, depth]}. A batch of images. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java index dd8f13b3189..45ffd3227c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpeg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.TUint8; @@ -50,6 +52,10 @@ *

  • 3: Output an RGB image.
  • * */ +@OpMetadata( + opType = EncodeJpeg.OP_NAME, + inputsClass = EncodeJpeg.Inputs.class +) @Operator( group = "image" ) @@ -61,8 +67,8 @@ public final class EncodeJpeg extends RawOp implements Operand { private Output contents; - private EncodeJpeg(Operation operation) { - super(operation); + public EncodeJpeg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; contents = operation.output(outputIdx++); } @@ -346,6 +352,9 @@ public Options xmpMetadata(String xmpMetadata) { } } + @OpInputsMetadata( + outputsClass = EncodeJpeg.class + ) public static class Inputs extends RawOpInputs { /** * 3-D with shape {@code [height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java index 7b2b8faef21..0800236434d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodeJpegVariableQuality.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -37,6 +39,10 @@ * {@code image} is a 3-D uint8 Tensor of shape {@code [height, width, channels]}. * {@code quality} is an int32 jpeg compression quality value between 0 and 100. */ +@OpMetadata( + opType = EncodeJpegVariableQuality.OP_NAME, + inputsClass = EncodeJpegVariableQuality.Inputs.class +) @Operator( group = "image" ) @@ -48,8 +54,8 @@ public final class EncodeJpegVariableQuality extends RawOp implements Operand contents; - private EncodeJpegVariableQuality(Operation operation) { - super(operation); + public EncodeJpegVariableQuality(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; contents = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return contents; } + @OpInputsMetadata( + outputsClass = EncodeJpegVariableQuality.class + ) public static class Inputs extends RawOpInputs { /** * Images to adjust. At least 3-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java index 81d56bc23a5..810617e128f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/EncodePng.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -46,6 +48,10 @@ * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. */ +@OpMetadata( + opType = EncodePng.OP_NAME, + inputsClass = EncodePng.Inputs.class +) @Operator( group = "image" ) @@ -57,8 +63,8 @@ public final class EncodePng extends RawOp implements Operand { private Output contents; - private EncodePng(Operation operation) { - super(operation); + public EncodePng(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; contents = operation.output(outputIdx++); } @@ -133,6 +139,9 @@ public Options compression(Long compression) { } } + @OpInputsMetadata( + outputsClass = EncodePng.class + ) public static class Inputs extends RawOpInputs { /** * 3-D with shape {@code [height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java index 45677ff83f1..fa8eba3e65b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractGlimpse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -52,6 +54,10 @@ * numbers of pixels. * */ +@OpMetadata( + opType = ExtractGlimpse.OP_NAME, + inputsClass = ExtractGlimpse.Inputs.class +) public final class ExtractGlimpse extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -60,8 +66,8 @@ public final class ExtractGlimpse extends RawOp implements Operand { private Output glimpse; - private ExtractGlimpse(Operation operation) { - super(operation); + public ExtractGlimpse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; glimpse = operation.output(outputIdx++); } @@ -233,6 +239,9 @@ public Options noise(String noise) { } } + @OpInputsMetadata( + outputsClass = ExtractGlimpse.class + ) public static class Inputs extends RawOpInputs { /** * A 4-D float tensor of shape {@code [batch_size, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java index 80353b0ab42..3c168cc0e87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code patches} output */ +@OpMetadata( + opType = ExtractImagePatches.OP_NAME, + inputsClass = ExtractImagePatches.Inputs.class +) @Operator( group = "image" ) @@ -48,8 +54,8 @@ public final class ExtractImagePatches extends RawOp implements private Output patches; - private ExtractImagePatches(Operation operation) { - super(operation); + public ExtractImagePatches(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; patches = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return patches; } + @OpInputsMetadata( + outputsClass = ExtractImagePatches.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D Tensor with shape {@code [batch, in_rows, in_cols, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java index d5c7f09060c..15b168d66ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code image_shape} output */ +@OpMetadata( + opType = ExtractJpegShape.OP_NAME, + inputsClass = ExtractJpegShape.Inputs.class +) @Operator( group = "image" ) @@ -51,8 +57,8 @@ public final class ExtractJpegShape extends RawOp implements private Output imageShape; - private ExtractJpegShape(Operation operation) { - super(operation); + public ExtractJpegShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; imageShape = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return imageShape; } + @OpInputsMetadata( + outputsClass = ExtractJpegShape.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The JPEG-encoded image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java index d0548098263..57643c3603a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/GenerateBoundingBoxProposals.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores. * */ +@OpMetadata( + opType = GenerateBoundingBoxProposals.OP_NAME, + inputsClass = GenerateBoundingBoxProposals.Inputs.class +) public final class GenerateBoundingBoxProposals extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -56,8 +62,8 @@ public final class GenerateBoundingBoxProposals extends RawOp { private Output roiProbabilities; - private GenerateBoundingBoxProposals(Operation operation) { - super(operation); + public GenerateBoundingBoxProposals(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; rois = operation.output(outputIdx++); roiProbabilities = operation.output(outputIdx++); @@ -154,6 +160,9 @@ public Options postNmsTopn(Long postNmsTopn) { } } + @OpInputsMetadata( + outputsClass = GenerateBoundingBoxProposals.class + ) public static class Inputs extends RawOpInputs { /** * A 4-D float tensor of shape {@code [num_images, height, width, num_achors]} containing scores of the boxes for given anchors, can be unsorted. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java index 8bbb40efb55..5d2dcea1595 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = HsvToRgb.OP_NAME, + inputsClass = HsvToRgb.Inputs.class +) @Operator( group = "image" ) @@ -51,8 +57,8 @@ public final class HsvToRgb extends RawOp implements Operand< private Output output; - private HsvToRgb(Operation operation) { - super(operation); + public HsvToRgb(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = HsvToRgb.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher rank. HSV data to convert. Last dimension must be size 3. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java index 9312eaed59f..99f87eb44dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * * @param data type for {@code transformed_images} output */ +@OpMetadata( + opType = ImageProjectiveTransformV2.OP_NAME, + inputsClass = ImageProjectiveTransformV2.Inputs.class +) public final class ImageProjectiveTransformV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class ImageProjectiveTransformV2 extends RawOp i private Output transformedImages; - private ImageProjectiveTransformV2(Operation operation) { - super(operation); + public ImageProjectiveTransformV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; transformedImages = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Options fillMode(String fillMode) { } } + @OpInputsMetadata( + outputsClass = ImageProjectiveTransformV2.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java index 229c0eaf656..0704a533e4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * * @param data type for {@code transformed_images} output */ +@OpMetadata( + opType = ImageProjectiveTransformV3.OP_NAME, + inputsClass = ImageProjectiveTransformV3.Inputs.class +) public final class ImageProjectiveTransformV3 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class ImageProjectiveTransformV3 extends RawOp i private Output transformedImages; - private ImageProjectiveTransformV3(Operation operation) { - super(operation); + public ImageProjectiveTransformV3(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; transformedImages = operation.output(outputIdx++); } @@ -139,6 +145,9 @@ public Options fillMode(String fillMode) { } } + @OpInputsMetadata( + outputsClass = ImageProjectiveTransformV3.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java index d8b061bc045..8c6cf3b47fc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NearestNeighbors.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -36,6 +38,10 @@ * the list of candidate centers. For each point, the k centers that have least L2 * distance to it are computed. */ +@OpMetadata( + opType = NearestNeighbors.OP_NAME, + inputsClass = NearestNeighbors.Inputs.class +) public final class NearestNeighbors extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class NearestNeighbors extends RawOp { private Output nearestCenterDistances; - private NearestNeighbors(Operation operation) { - super(operation); + public NearestNeighbors(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nearestCenterIndices = operation.output(outputIdx++); nearestCenterDistances = operation.output(outputIdx++); @@ -95,6 +101,9 @@ public Output nearestCenterDistances() { return nearestCenterDistances; } + @OpInputsMetadata( + outputsClass = NearestNeighbors.class + ) public static class Inputs extends RawOpInputs { /** * Matrix of shape (n, d). Rows are assumed to be input points. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java index 910c2048d7f..704e99ea7df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -59,6 +61,10 @@ * * @param data type for {@code selected_scores} output */ +@OpMetadata( + opType = NonMaxSuppression.OP_NAME, + inputsClass = NonMaxSuppression.Inputs.class +) @Operator( group = "image" ) @@ -74,8 +80,8 @@ public final class NonMaxSuppression extends RawOp { private Output validOutputs; - private NonMaxSuppression(Operation operation) { - super(operation); + public NonMaxSuppression(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; selectedIndices = operation.output(outputIdx++); selectedScores = operation.output(outputIdx++); @@ -190,6 +196,9 @@ public Options padToMaxOutputSize(Boolean padToMaxOutputSize) { } } + @OpInputsMetadata( + outputsClass = NonMaxSuppression.class + ) public static class Inputs extends RawOpInputs> { /** * A 2-D float tensor of shape {@code [num_boxes, 4]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java index f12b180716f..9325d24b151 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppressionWithOverlaps.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) */ +@OpMetadata( + opType = NonMaxSuppressionWithOverlaps.OP_NAME, + inputsClass = NonMaxSuppressionWithOverlaps.Inputs.class +) @Operator( group = "image" ) @@ -57,8 +63,8 @@ public final class NonMaxSuppressionWithOverlaps extends RawOp implements Operan private Output selectedIndices; - private NonMaxSuppressionWithOverlaps(Operation operation) { - super(operation); + public NonMaxSuppressionWithOverlaps(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; selectedIndices = operation.output(outputIdx++); } @@ -109,6 +115,9 @@ public Output asOutput() { return selectedIndices; } + @OpInputsMetadata( + outputsClass = NonMaxSuppressionWithOverlaps.class + ) public static class Inputs extends RawOpInputs { /** * A 2-D float tensor of shape {@code [num_boxes, num_boxes]} representing diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java index ef8936425db..5d0d7ba5c59 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -39,6 +41,10 @@ * * @param data type for {@code resized_images} output */ +@OpMetadata( + opType = QuantizedResizeBilinear.OP_NAME, + inputsClass = QuantizedResizeBilinear.Inputs.class +) @Operator( group = "image" ) @@ -54,8 +60,8 @@ public final class QuantizedResizeBilinear extends RawOp { private Output outMax; - private QuantizedResizeBilinear(Operation operation) { - super(operation); + public QuantizedResizeBilinear(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); outMin = operation.output(outputIdx++); @@ -183,6 +189,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = QuantizedResizeBilinear.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java index 580d7a893a1..1fb7066fe16 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomCrop.OP_NAME, + inputsClass = RandomCrop.Inputs.class +) @Operator( group = "image" ) @@ -53,8 +59,8 @@ public final class RandomCrop extends RawOp implements Operan private Output output; - private RandomCrop(Operation operation) { - super(operation); + public RandomCrop(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -162,6 +168,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomCrop.class + ) public static class Inputs extends RawOpInputs> { /** * 3-D of shape {@code [height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java index fde981548d8..15f34355739 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeArea.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -45,6 +47,10 @@ * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. */ +@OpMetadata( + opType = ResizeArea.OP_NAME, + inputsClass = ResizeArea.Inputs.class +) @Operator( group = "image" ) @@ -56,8 +62,8 @@ public final class ResizeArea extends RawOp implements Operand { private Output resizedImages; - private ResizeArea(Operation operation) { - super(operation); + public ResizeArea(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); } @@ -138,6 +144,9 @@ public Options alignCorners(Boolean alignCorners) { } } + @OpInputsMetadata( + outputsClass = ResizeArea.class + ) public static class Inputs extends RawOpInputs { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java index df9a6dd7977..3a4668dee5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubic.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -37,6 +39,10 @@ * Resize {@code images} to {@code size} using bicubic interpolation. * Input images can be of different types but output images are always float. */ +@OpMetadata( + opType = ResizeBicubic.OP_NAME, + inputsClass = ResizeBicubic.Inputs.class +) @Operator( group = "image" ) @@ -48,8 +54,8 @@ public final class ResizeBicubic extends RawOp implements Operand { private Output resizedImages; - private ResizeBicubic(Operation operation) { - super(operation); + public ResizeBicubic(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); } @@ -156,6 +162,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeBicubic.class + ) public static class Inputs extends RawOpInputs { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java index 1e9aabf70d3..efa4aa46714 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResizeBicubicGrad.OP_NAME, + inputsClass = ResizeBicubicGrad.Inputs.class +) public final class ResizeBicubicGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ResizeBicubicGrad extends RawOp implements private Output output; - private ResizeBicubicGrad(Operation operation) { - super(operation); + public ResizeBicubicGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeBicubicGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java index d42617f0aaa..761bb72ae82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinear.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -37,6 +39,10 @@ * Resize {@code images} to {@code size} using bilinear interpolation. * Input images can be of different types but output images are always float. */ +@OpMetadata( + opType = ResizeBilinear.OP_NAME, + inputsClass = ResizeBilinear.Inputs.class +) @Operator( group = "image" ) @@ -48,8 +54,8 @@ public final class ResizeBilinear extends RawOp implements Operand { private Output resizedImages; - private ResizeBilinear(Operation operation) { - super(operation); + public ResizeBilinear(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); } @@ -156,6 +162,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeBilinear.class + ) public static class Inputs extends RawOpInputs { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java index 25312aaccc1..c49e71c5998 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResizeBilinearGrad.OP_NAME, + inputsClass = ResizeBilinearGrad.Inputs.class +) public final class ResizeBilinearGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ResizeBilinearGrad extends RawOp implement private Output output; - private ResizeBilinearGrad(Operation operation) { - super(operation); + public ResizeBilinearGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeBilinearGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java index cc81189c0d6..0966a28bf36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ * * @param data type for {@code resized_images} output */ +@OpMetadata( + opType = ResizeNearestNeighbor.OP_NAME, + inputsClass = ResizeNearestNeighbor.Inputs.class +) @Operator( group = "image" ) @@ -48,8 +54,8 @@ public final class ResizeNearestNeighbor extends RawOp implem private Output resizedImages; - private ResizeNearestNeighbor(Operation operation) { - super(operation); + public ResizeNearestNeighbor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); } @@ -157,6 +163,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeNearestNeighbor.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java index 0aafc8d0f5a..71d8dd28257 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ResizeNearestNeighborGrad.OP_NAME, + inputsClass = ResizeNearestNeighborGrad.Inputs.class +) public final class ResizeNearestNeighborGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ResizeNearestNeighborGrad extends RawOp im private Output output; - private ResizeNearestNeighborGrad(Operation operation) { - super(operation); + public ResizeNearestNeighborGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -153,6 +159,9 @@ public Options halfPixelCenters(Boolean halfPixelCenters) { } } + @OpInputsMetadata( + outputsClass = ResizeNearestNeighborGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java index f06112cddb1..af5fe0fcbe4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RgbToHsv.OP_NAME, + inputsClass = RgbToHsv.Inputs.class +) @Operator( group = "image" ) @@ -68,8 +74,8 @@ public final class RgbToHsv extends RawOp implements Operand< private Output output; - private RgbToHsv(Operation operation) { - super(operation); + public RgbToHsv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RgbToHsv.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher rank. RGB data to convert. Last dimension must be size 3. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java index bee6684766e..0d85ae63003 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -71,6 +73,10 @@ * * @param data type for {@code begin} output */ +@OpMetadata( + opType = SampleDistortedBoundingBox.OP_NAME, + inputsClass = SampleDistortedBoundingBox.Inputs.class +) @Operator( group = "image" ) @@ -86,8 +92,8 @@ public final class SampleDistortedBoundingBox extends RawOp { private Output bboxes; - private SampleDistortedBoundingBox(Operation operation) { - super(operation); + public SampleDistortedBoundingBox(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; begin = operation.output(outputIdx++); sizeOutput = operation.output(outputIdx++); @@ -390,6 +396,9 @@ public Options useImageIfNoBoundingBoxes(Boolean useImageIfNoBoundingBoxes) { } } + @OpInputsMetadata( + outputsClass = SampleDistortedBoundingBox.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D, containing {@code [height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java index 5fd3d8f194c..3c1fe9bacf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslate.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -36,6 +38,10 @@ /** * The ScaleAndTranslate operation */ +@OpMetadata( + opType = ScaleAndTranslate.OP_NAME, + inputsClass = ScaleAndTranslate.Inputs.class +) @Operator( group = "image" ) @@ -47,8 +53,8 @@ public final class ScaleAndTranslate extends RawOp implements Operand private Output resizedImages; - private ScaleAndTranslate(Operation operation) { - super(operation); + public ScaleAndTranslate(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resizedImages = operation.output(outputIdx++); } @@ -156,6 +162,9 @@ public Options antialias(Boolean antialias) { } } + @OpInputsMetadata( + outputsClass = ScaleAndTranslate.class + ) public static class Inputs extends RawOpInputs { /** * The images input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java index ad62f12fabe..b817b6640b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ScaleAndTranslateGrad.OP_NAME, + inputsClass = ScaleAndTranslateGrad.Inputs.class +) public final class ScaleAndTranslateGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class ScaleAndTranslateGrad extends RawOp implem private Output output; - private ScaleAndTranslateGrad(Operation operation) { - super(operation); + public ScaleAndTranslateGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options antialias(Boolean antialias) { } } + @OpInputsMetadata( + outputsClass = ScaleAndTranslateGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The grads input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java index 9bdb6969f7d..d2a89901edc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -96,6 +98,10 @@ * * @param data type for {@code begin} output */ +@OpMetadata( + opType = StatelessSampleDistortedBoundingBox.OP_NAME, + inputsClass = StatelessSampleDistortedBoundingBox.Inputs.class +) @Operator( group = "image" ) @@ -111,8 +117,8 @@ public final class StatelessSampleDistortedBoundingBox extend private Output bboxes; - private StatelessSampleDistortedBoundingBox(Operation operation) { - super(operation); + public StatelessSampleDistortedBoundingBox(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; begin = operation.output(outputIdx++); sizeOutput = operation.output(outputIdx++); @@ -362,6 +368,9 @@ public Options useImageIfNoBoundingBoxes(Boolean useImageIfNoBoundingBoxes) { } } + @OpInputsMetadata( + outputsClass = StatelessSampleDistortedBoundingBox.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D, containing {@code [height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java index 4ebc7750b91..473246f9290 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ * Input may or may not have padding at the end. See EncodeBase64 for padding. * Web-safe means that input must use - and _ instead of + and /. */ +@OpMetadata( + opType = DecodeBase64.OP_NAME, + inputsClass = DecodeBase64.Inputs.class +) @Operator( group = "io" ) @@ -46,8 +52,8 @@ public final class DecodeBase64 extends RawOp implements Operand { private Output output; - private DecodeBase64(Operation operation) { - super(operation); + public DecodeBase64(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DecodeBase64.class + ) public static class Inputs extends RawOpInputs { /** * Base64 strings to decode. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java index f732ce500eb..a194b362695 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCompressed.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * each element containing the decompressed data from the corresponding * element in {@code bytes}. */ +@OpMetadata( + opType = DecodeCompressed.OP_NAME, + inputsClass = DecodeCompressed.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class DecodeCompressed extends RawOp implements Operand { private Output output; - private DecodeCompressed(Operation operation) { - super(operation); + public DecodeCompressed(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options compressionType(String compressionType) { } } + @OpInputsMetadata( + outputsClass = DecodeCompressed.class + ) public static class Inputs extends RawOpInputs { /** * A Tensor of string which is compressed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java index 20073756dc6..3e3db5ec2d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeCsv.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -41,6 +43,10 @@ * (https://tools.ietf.org/html/rfc4180) * Note that we allow leading and trailing spaces with int or float field. */ +@OpMetadata( + opType = DecodeCsv.OP_NAME, + inputsClass = DecodeCsv.Inputs.class +) @Operator( group = "io" ) @@ -53,8 +59,8 @@ public final class DecodeCsv extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private DecodeCsv(Operation operation) { - super(operation); + public DecodeCsv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -244,6 +250,9 @@ public Options selectCols(Long... selectCols) { } } + @OpInputsMetadata( + outputsClass = DecodeCsv.class + ) public static class Inputs extends RawOpInputs { /** * Each string is a record/row in the csv and all records should have diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java index e27ec07fce8..84e4eb4ad97 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeJsonExample.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -40,6 +42,10 @@ * {@code Example.SerializeToString()}) suitable for conversion to tensors with * {@code tf.io.parse_example}. */ +@OpMetadata( + opType = DecodeJsonExample.OP_NAME, + inputsClass = DecodeJsonExample.Inputs.class +) @Operator( group = "io" ) @@ -51,8 +57,8 @@ public final class DecodeJsonExample extends RawOp implements Operand { private Output binaryExamples; - private DecodeJsonExample(Operation operation) { - super(operation); + public DecodeJsonExample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; binaryExamples = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return binaryExamples; } + @OpInputsMetadata( + outputsClass = DecodeJsonExample.class + ) public static class Inputs extends RawOpInputs { /** * Each string is a JSON object serialized according to the JSON diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java index 693527476d2..35570618220 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DecodePaddedRaw.OP_NAME, + inputsClass = DecodePaddedRaw.Inputs.class +) @Operator( group = "io" ) @@ -50,8 +56,8 @@ public final class DecodePaddedRaw extends RawOp implements O private Output output; - private DecodePaddedRaw(Operation operation) { - super(operation); + public DecodePaddedRaw(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Options littleEndian(Boolean littleEndian) { } } + @OpInputsMetadata( + outputsClass = DecodePaddedRaw.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor of string to be decoded. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java index 393cd50e94b..56f2c093b0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DecodeRaw.OP_NAME, + inputsClass = DecodeRaw.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class DecodeRaw extends RawOp implements Operand output; - private DecodeRaw(Operation operation) { - super(operation); + public DecodeRaw(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -134,6 +140,9 @@ public Options littleEndian(Boolean littleEndian) { } } + @OpInputsMetadata( + outputsClass = DecodeRaw.class + ) public static class Inputs extends RawOpInputs> { /** * All the elements must have the same length. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java index 9c900322d81..ae3949d3284 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -78,6 +80,10 @@ * * @param data type for {@code sparse_values} output */ +@OpMetadata( + opType = DeserializeManySparse.OP_NAME, + inputsClass = DeserializeManySparse.Inputs.class +) @Operator( group = "io" ) @@ -93,8 +99,8 @@ public final class DeserializeManySparse extends RawOp { private Output sparseShape; - private DeserializeManySparse(Operation operation) { - super(operation); + public DeserializeManySparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseIndices = operation.output(outputIdx++); sparseValues = operation.output(outputIdx++); @@ -149,6 +155,9 @@ public Output sparseShape() { return sparseShape; } + @OpInputsMetadata( + outputsClass = DeserializeManySparse.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D, The {@code N} serialized {@code SparseTensor} objects. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java index 2f632b33937..daddfee308a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * link above. *

    Web-safe means that the encoder uses - and _ instead of + and /. */ +@OpMetadata( + opType = EncodeBase64.OP_NAME, + inputsClass = EncodeBase64.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class EncodeBase64 extends RawOp implements Operand { private Output output; - private EncodeBase64(Operation operation) { - super(operation); + public EncodeBase64(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -124,6 +130,9 @@ public Options pad(Boolean pad) { } } + @OpInputsMetadata( + outputsClass = EncodeBase64.class + ) public static class Inputs extends RawOpInputs { /** * Strings to be encoded. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java index 0a2a8f21957..36cff78458e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FifoQueue.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * A queue that produces elements in first-in first-out order. */ +@OpMetadata( + opType = FifoQueue.OP_NAME, + inputsClass = FifoQueue.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class FifoQueue extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private FifoQueue(Operation operation) { - super(operation); + public FifoQueue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -247,6 +253,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = FifoQueue.class + ) public static class Inputs extends RawOpInputs { /** * The type of each component in a value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java index 105ab72ea3d..ad16397019e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/FixedLengthRecordReader.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * A Reader that outputs fixed-length records from a file. */ +@OpMetadata( + opType = FixedLengthRecordReader.OP_NAME, + inputsClass = FixedLengthRecordReader.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class FixedLengthRecordReader extends RawOp implements Operand readerHandle; @SuppressWarnings("unchecked") - private FixedLengthRecordReader(Operation operation) { - super(operation); + public FixedLengthRecordReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -259,6 +265,9 @@ public Options encoding(String encoding) { } } + @OpInputsMetadata( + outputsClass = FixedLengthRecordReader.class + ) public static class Inputs extends RawOpInputs { /** * Number of bytes in the header, defaults to 0. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java index a58cc80a838..453a17b7ccf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/IdentityReader.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * To use, enqueue strings in a Queue. ReaderRead will take the front * work string and output (work, work). */ +@OpMetadata( + opType = IdentityReader.OP_NAME, + inputsClass = IdentityReader.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class IdentityReader extends RawOp implements Operand { private Output readerHandle; @SuppressWarnings("unchecked") - private IdentityReader(Operation operation) { - super(operation); + public IdentityReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = IdentityReader.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this reader is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java index f90a320cf2b..9be472413d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/LmdbReader.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** * A Reader that outputs the records from a LMDB file. */ +@OpMetadata( + opType = LmdbReader.OP_NAME, + inputsClass = LmdbReader.Inputs.class +) @Operator( group = "io" ) @@ -44,8 +50,8 @@ public final class LmdbReader extends RawOp implements Operand { private Output readerHandle; - private LmdbReader(Operation operation) { - super(operation); + public LmdbReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -147,6 +153,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = LmdbReader.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this reader is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java index 8c522a5ede2..217f5a80629 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/MatchingFiles.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -36,6 +38,10 @@ * basename portion of the pattern, not in the directory portion. * Note also that the order of filenames returned is deterministic. */ +@OpMetadata( + opType = MatchingFiles.OP_NAME, + inputsClass = MatchingFiles.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class MatchingFiles extends RawOp implements Operand { private Output filenames; - private MatchingFiles(Operation operation) { - super(operation); + public MatchingFiles(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; filenames = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return filenames; } + @OpInputsMetadata( + outputsClass = MatchingFiles.class + ) public static class Inputs extends RawOpInputs { /** * Shell wildcard pattern(s). Scalar or vector of type string. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java index 82846004272..f07f71eb1d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PaddingFifoQueue.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum * size of any given element in the minibatch. See below for details. */ +@OpMetadata( + opType = PaddingFifoQueue.OP_NAME, + inputsClass = PaddingFifoQueue.Inputs.class +) @Operator( group = "io" ) @@ -52,8 +58,8 @@ public final class PaddingFifoQueue extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private PaddingFifoQueue(Operation operation) { - super(operation); + public PaddingFifoQueue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -266,6 +272,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = PaddingFifoQueue.class + ) public static class Inputs extends RawOpInputs { /** * The type of each component in a value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java index e35ba099a79..6cd578647d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseExample.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -40,6 +42,10 @@ /** * Transforms a vector of tf.Example protos (as strings) into typed tensors. */ +@OpMetadata( + opType = ParseExample.OP_NAME, + inputsClass = ParseExample.Inputs.class +) @Operator( group = "io" ) @@ -62,8 +68,8 @@ public final class ParseExample extends RawOp { private List> raggedRowSplits; @SuppressWarnings("unchecked") - private ParseExample(Operation operation) { - super(operation); + public ParseExample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int sparseIndicesLength = operation.outputListLength("sparse_indices"); sparseIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, sparseIndicesLength)); @@ -222,6 +228,9 @@ public List> raggedRowSplits() { return raggedRowSplits; } + @OpInputsMetadata( + outputsClass = ParseExample.class + ) public static class Inputs extends RawOpInputs { /** * A scalar or vector containing binary serialized Example protos. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java index 27bd0d243d2..c01ca5f36b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSequenceExample.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -42,6 +44,10 @@ * Transforms a vector of tf.io.SequenceExample protos (as strings) into * typed tensors. */ +@OpMetadata( + opType = ParseSequenceExample.OP_NAME, + inputsClass = ParseSequenceExample.Inputs.class +) @Operator( group = "io" ) @@ -80,8 +86,8 @@ public final class ParseSequenceExample extends RawOp { private List> featureListRaggedInnerSplits; @SuppressWarnings("unchecked") - private ParseSequenceExample(Operation operation) { - super(operation); + public ParseSequenceExample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int contextSparseIndicesLength = operation.outputListLength("context_sparse_indices"); contextSparseIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, contextSparseIndicesLength)); @@ -560,6 +566,9 @@ public Options featureListDenseShapes(Shape... featureListDenseShapes) { } } + @OpInputsMetadata( + outputsClass = ParseSequenceExample.class + ) public static class Inputs extends RawOpInputs { /** * A scalar or vector containing binary serialized SequenceExample protos. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java index 2c0b92b6acb..c316dd66768 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleExample.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * Transforms a tf.Example proto (as a string) into typed tensors. */ +@OpMetadata( + opType = ParseSingleExample.OP_NAME, + inputsClass = ParseSingleExample.Inputs.class +) @Operator( group = "io" ) @@ -57,8 +63,8 @@ public final class ParseSingleExample extends RawOp { private List> denseValues; @SuppressWarnings("unchecked") - private ParseSingleExample(Operation operation) { - super(operation); + public ParseSingleExample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int sparseIndicesLength = operation.outputListLength("sparse_indices"); sparseIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, sparseIndicesLength)); @@ -175,6 +181,9 @@ public List> denseValues() { return denseValues; } + @OpInputsMetadata( + outputsClass = ParseSingleExample.class + ) public static class Inputs extends RawOpInputs { /** * A vector containing a batch of binary serialized Example protos. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java index c5d98799d5a..884bf9f6b49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseSingleSequenceExample.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ /** * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. */ +@OpMetadata( + opType = ParseSingleSequenceExample.OP_NAME, + inputsClass = ParseSingleSequenceExample.Inputs.class +) @Operator( group = "io" ) @@ -65,8 +71,8 @@ public final class ParseSingleSequenceExample extends RawOp { private List> featureListDenseValues; @SuppressWarnings("unchecked") - private ParseSingleSequenceExample(Operation operation) { - super(operation); + public ParseSingleSequenceExample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int contextSparseIndicesLength = operation.outputListLength("context_sparse_indices"); contextSparseIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, contextSparseIndicesLength)); @@ -488,6 +494,9 @@ public Options featureListDenseShapes(Shape... featureListDenseShapes) { } } + @OpInputsMetadata( + outputsClass = ParseSingleSequenceExample.class + ) public static class Inputs extends RawOpInputs { /** * A scalar containing a binary serialized SequenceExample proto. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java index cf71f17624d..e9819f92af6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ParseTensor.OP_NAME, + inputsClass = ParseTensor.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class ParseTensor extends RawOp implements Operand private Output output; - private ParseTensor(Operation operation) { - super(operation); + public ParseTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ParseTensor.class + ) public static class Inputs extends RawOpInputs> { /** * A scalar string containing a serialized TensorProto proto. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java index 3908003de08..a486a2c9983 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/PriorityQueue.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra * entry in their input (resp. output) lists. */ +@OpMetadata( + opType = PriorityQueue.OP_NAME, + inputsClass = PriorityQueue.Inputs.class +) @Operator( group = "io" ) @@ -54,8 +60,8 @@ public final class PriorityQueue extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private PriorityQueue(Operation operation) { - super(operation); + public PriorityQueue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -198,6 +204,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = PriorityQueue.class + ) public static class Inputs extends RawOpInputs { /** * The type of each component in a value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java index bc2bfa3fff4..6ad758b2def 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueClose.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * sufficient elements remain in the queue. Subsequent Dequeue(Many) * operations that would block will fail immediately. */ +@OpMetadata( + opType = QueueClose.OP_NAME, + inputsClass = QueueClose.Inputs.class +) @Operator( group = "io" ) @@ -46,8 +52,8 @@ public final class QueueClose extends RawOp { */ public static final String OP_NAME = "QueueCloseV2"; - private QueueClose(Operation operation) { - super(operation); + public QueueClose(Operation operation) { + super(operation, OP_NAME); } /** @@ -108,6 +114,9 @@ public Options cancelPendingEnqueues(Boolean cancelPendingEnqueues) { } } + @OpInputsMetadata( + outputsClass = QueueClose.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java index 469de83d736..92f3d42bea7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeue.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ *

    N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). */ +@OpMetadata( + opType = QueueDequeue.OP_NAME, + inputsClass = QueueDequeue.Inputs.class +) @Operator( group = "io" ) @@ -54,8 +60,8 @@ public final class QueueDequeue extends RawOp implements Iterable private List> components; @SuppressWarnings("unchecked") - private QueueDequeue(Operation operation) { - super(operation); + public QueueDequeue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -139,6 +145,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = QueueDequeue.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java index ed61ce786c2..658875387f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueMany.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ *

    N.B. If the queue is empty, this operation will block until {@code n} elements * have been dequeued (or 'timeout_ms' elapses, if specified). */ +@OpMetadata( + opType = QueueDequeueMany.OP_NAME, + inputsClass = QueueDequeueMany.Inputs.class +) @Operator( group = "io" ) @@ -60,8 +66,8 @@ public final class QueueDequeueMany extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private QueueDequeueMany(Operation operation) { - super(operation); + public QueueDequeueMany(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -147,6 +153,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = QueueDequeueMany.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java index 91bec450c7b..8073d83c6f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueDequeueUpTo.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -52,6 +54,10 @@ * the tuples stored in the given queue, and output {@code i} is the ith * component of the dequeued tuple. */ +@OpMetadata( + opType = QueueDequeueUpTo.OP_NAME, + inputsClass = QueueDequeueUpTo.Inputs.class +) @Operator( group = "io" ) @@ -64,8 +70,8 @@ public final class QueueDequeueUpTo extends RawOp implements Iterable> components; @SuppressWarnings("unchecked") - private QueueDequeueUpTo(Operation operation) { - super(operation); + public QueueDequeueUpTo(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int componentsLength = operation.outputListLength("components"); components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); @@ -151,6 +157,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = QueueDequeueUpTo.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java index 1872c43e0a9..e712a9db04c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueue.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ *

    N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). */ +@OpMetadata( + opType = QueueEnqueue.OP_NAME, + inputsClass = QueueEnqueue.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class QueueEnqueue extends RawOp { */ public static final String OP_NAME = "QueueEnqueueV2"; - private QueueEnqueue(Operation operation) { - super(operation); + public QueueEnqueue(Operation operation) { + super(operation, OP_NAME); } /** @@ -113,6 +119,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = QueueEnqueue.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java index d0c17369fca..c93c37ca288 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueEnqueueMany.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ *

    N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). */ +@OpMetadata( + opType = QueueEnqueueMany.OP_NAME, + inputsClass = QueueEnqueueMany.Inputs.class +) @Operator( group = "io" ) @@ -50,8 +56,8 @@ public final class QueueEnqueueMany extends RawOp { */ public static final String OP_NAME = "QueueEnqueueManyV2"; - private QueueEnqueueMany(Operation operation) { - super(operation); + public QueueEnqueueMany(Operation operation) { + super(operation, OP_NAME); } /** @@ -117,6 +123,9 @@ public Options timeoutMs(Long timeoutMs) { } } + @OpInputsMetadata( + outputsClass = QueueEnqueueMany.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java index 29fdf801889..422e3a0cea1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueIsClosed.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * This operation returns true if the queue is closed and false if the queue * is open. */ +@OpMetadata( + opType = QueueIsClosed.OP_NAME, + inputsClass = QueueIsClosed.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class QueueIsClosed extends RawOp implements Operand { private Output isClosed; - private QueueIsClosed(Operation operation) { - super(operation); + public QueueIsClosed(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; isClosed = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return isClosed; } + @OpInputsMetadata( + outputsClass = QueueIsClosed.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java index 2505096d056..b668f011af8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/QueueSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Computes the number of elements in the given queue. */ +@OpMetadata( + opType = QueueSize.OP_NAME, + inputsClass = QueueSize.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class QueueSize extends RawOp implements Operand { private Output output; - private QueueSize(Operation operation) { - super(operation); + public QueueSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = QueueSize.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java index c0f90c4cd81..ca4714bb4f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/RandomShuffleQueue.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ /** * A queue that randomizes the order of elements. */ +@OpMetadata( + opType = RandomShuffleQueue.OP_NAME, + inputsClass = RandomShuffleQueue.Inputs.class +) @Operator( group = "io" ) @@ -49,8 +55,8 @@ public final class RandomShuffleQueue extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private RandomShuffleQueue(Operation operation) { - super(operation); + public RandomShuffleQueue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -331,6 +337,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = RandomShuffleQueue.class + ) public static class Inputs extends RawOpInputs { /** * The type of each component in a value. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java index f7d467d0cb7..ce25b5aedf8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReadFile.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** * Reads and outputs the entire contents of the input filename. */ +@OpMetadata( + opType = ReadFile.OP_NAME, + inputsClass = ReadFile.Inputs.class +) @Operator( group = "io" ) @@ -44,8 +50,8 @@ public final class ReadFile extends RawOp implements Operand { private Output contents; - private ReadFile(Operation operation) { - super(operation); + public ReadFile(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; contents = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return contents; } + @OpInputsMetadata( + outputsClass = ReadFile.class + ) public static class Inputs extends RawOpInputs { /** * The filename input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java index 7f9c067dc03..40971f29991 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumRecordsProduced.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * This is the same as the number of ReaderRead executions that have * succeeded. */ +@OpMetadata( + opType = ReaderNumRecordsProduced.OP_NAME, + inputsClass = ReaderNumRecordsProduced.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class ReaderNumRecordsProduced extends RawOp implements Operand recordsProduced; - private ReaderNumRecordsProduced(Operation operation) { - super(operation); + public ReaderNumRecordsProduced(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; recordsProduced = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return recordsProduced; } + @OpInputsMetadata( + outputsClass = ReaderNumRecordsProduced.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java index 0271bde551e..fe23ec6b424 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderNumWorkUnitsCompleted.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Returns the number of work units this Reader has finished processing. */ +@OpMetadata( + opType = ReaderNumWorkUnitsCompleted.OP_NAME, + inputsClass = ReaderNumWorkUnitsCompleted.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class ReaderNumWorkUnitsCompleted extends RawOp implements Operand< private Output unitsCompleted; - private ReaderNumWorkUnitsCompleted(Operation operation) { - super(operation); + public ReaderNumWorkUnitsCompleted(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; unitsCompleted = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return unitsCompleted; } + @OpInputsMetadata( + outputsClass = ReaderNumWorkUnitsCompleted.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java index c905794a387..ed71559ff04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRead.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * Reader needs to start reading from a new file since it has finished * with the previous file). */ +@OpMetadata( + opType = ReaderRead.OP_NAME, + inputsClass = ReaderRead.Inputs.class +) @Operator( group = "io" ) @@ -50,8 +56,8 @@ public final class ReaderRead extends RawOp { private Output value; - private ReaderRead(Operation operation) { - super(operation); + public ReaderRead(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; key = operation.output(outputIdx++); value = operation.output(outputIdx++); @@ -94,6 +100,9 @@ public Output value() { return value; } + @OpInputsMetadata( + outputsClass = ReaderRead.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java index 0fa533b15fe..be4b123e682 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReadUpTo.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ * with the previous file). * It may return less than {@code num_records} even before the last batch. */ +@OpMetadata( + opType = ReaderReadUpTo.OP_NAME, + inputsClass = ReaderReadUpTo.Inputs.class +) @Operator( group = "io" ) @@ -52,8 +58,8 @@ public final class ReaderReadUpTo extends RawOp { private Output values; - private ReaderReadUpTo(Operation operation) { - super(operation); + public ReaderReadUpTo(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; keys = operation.output(outputIdx++); values = operation.output(outputIdx++); @@ -98,6 +104,9 @@ public Output values() { return values; } + @OpInputsMetadata( + outputsClass = ReaderReadUpTo.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a {@code Reader}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java index 2523ed24148..29abce1e9bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderReset.java @@ -26,12 +26,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * Restore a Reader to its initial clean state. */ +@OpMetadata( + opType = ReaderReset.OP_NAME, + inputsClass = ReaderReset.Inputs.class +) @Operator( group = "io" ) @@ -41,8 +47,8 @@ public final class ReaderReset extends RawOp { */ public static final String OP_NAME = "ReaderResetV2"; - private ReaderReset(Operation operation) { - super(operation); + public ReaderReset(Operation operation) { + super(operation, OP_NAME); } /** @@ -61,6 +67,9 @@ public static ReaderReset create(Scope scope, Operand readerHan return new ReaderReset(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ReaderReset.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java index 0d9a421b5f9..ec8cd6b1e3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderRestoreState.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Not all Readers support being restored, so this can produce an * Unimplemented error. */ +@OpMetadata( + opType = ReaderRestoreState.OP_NAME, + inputsClass = ReaderRestoreState.Inputs.class +) @Operator( group = "io" ) @@ -44,8 +50,8 @@ public final class ReaderRestoreState extends RawOp { */ public static final String OP_NAME = "ReaderRestoreStateV2"; - private ReaderRestoreState(Operation operation) { - super(operation); + public ReaderRestoreState(Operation operation) { + super(operation, OP_NAME); } /** @@ -68,6 +74,9 @@ public static ReaderRestoreState create(Scope scope, Operand re return new ReaderRestoreState(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ReaderRestoreState.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java index e4718641bcd..279c9011b5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ReaderSerializeState.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * Not all Readers support being serialized, so this can produce an * Unimplemented error. */ +@OpMetadata( + opType = ReaderSerializeState.OP_NAME, + inputsClass = ReaderSerializeState.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class ReaderSerializeState extends RawOp implements Operand state; - private ReaderSerializeState(Operation operation) { - super(operation); + public ReaderSerializeState(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; state = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return state; } + @OpInputsMetadata( + outputsClass = ReaderSerializeState.class + ) public static class Inputs extends RawOpInputs { /** * Handle to a Reader. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java index 482eb5554e4..3931de53007 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -45,6 +47,10 @@ * * @param data type for {@code serialized_sparse} output */ +@OpMetadata( + opType = SerializeManySparse.OP_NAME, + inputsClass = SerializeManySparse.Inputs.class +) @Operator( group = "io" ) @@ -56,8 +62,8 @@ public final class SerializeManySparse extends RawOp implements private Output serializedSparse; - private SerializeManySparse(Operation operation) { - super(operation); + public SerializeManySparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; serializedSparse = operation.output(outputIdx++); } @@ -119,6 +125,9 @@ public Output asOutput() { return serializedSparse; } + @OpInputsMetadata( + outputsClass = SerializeManySparse.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. The {@code indices} of the minibatch {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java index b2adf565e6e..30f48d91bce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ * * @param data type for {@code serialized_sparse} output */ +@OpMetadata( + opType = SerializeSparse.OP_NAME, + inputsClass = SerializeSparse.Inputs.class +) @Operator( group = "io" ) @@ -50,8 +56,8 @@ public final class SerializeSparse extends RawOp implements Ope private Output serializedSparse; - private SerializeSparse(Operation operation) { - super(operation); + public SerializeSparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; serializedSparse = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return serializedSparse; } + @OpInputsMetadata( + outputsClass = SerializeSparse.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. The {@code indices} of the {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java index 3321749b0b1..cb077bc90f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeTensor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * Transforms a Tensor into a serialized TensorProto proto. */ +@OpMetadata( + opType = SerializeTensor.OP_NAME, + inputsClass = SerializeTensor.Inputs.class +) @Operator( group = "io" ) @@ -46,8 +52,8 @@ public final class SerializeTensor extends RawOp implements Operand { private Output serialized; - private SerializeTensor(Operation operation) { - super(operation); + public SerializeTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; serialized = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return serialized; } + @OpInputsMetadata( + outputsClass = SerializeTensor.class + ) public static class Inputs extends RawOpInputs { /** * A Tensor of type {@code T}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java index 0693814d39c..48c5d5aa871 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilename.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ * Generate a sharded filename. The filename is printf formatted as * %s-%05d-of-%05d, basename, shard, num_shards. */ +@OpMetadata( + opType = ShardedFilename.OP_NAME, + inputsClass = ShardedFilename.Inputs.class +) @Operator( group = "io" ) @@ -46,8 +52,8 @@ public final class ShardedFilename extends RawOp implements Operand { private Output filename; - private ShardedFilename(Operation operation) { - super(operation); + public ShardedFilename(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; filename = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return filename; } + @OpInputsMetadata( + outputsClass = ShardedFilename.class + ) public static class Inputs extends RawOpInputs { /** * The basename input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java index f7aec950873..da2d98807b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ShardedFilespec.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -34,6 +36,10 @@ /** * Generate a glob pattern matching all sharded file names. */ +@OpMetadata( + opType = ShardedFilespec.OP_NAME, + inputsClass = ShardedFilespec.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class ShardedFilespec extends RawOp implements Operand { private Output filename; - private ShardedFilespec(Operation operation) { - super(operation); + public ShardedFilespec(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; filename = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return filename; } + @OpInputsMetadata( + outputsClass = ShardedFilespec.class + ) public static class Inputs extends RawOpInputs { /** * The basename input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java index 417b34c310c..ba98d273703 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TextLineReader.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * A Reader that outputs the lines of a file delimited by '\n'. */ +@OpMetadata( + opType = TextLineReader.OP_NAME, + inputsClass = TextLineReader.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class TextLineReader extends RawOp implements Operand { private Output readerHandle; @SuppressWarnings("unchecked") - private TextLineReader(Operation operation) { - super(operation); + public TextLineReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -175,6 +181,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = TextLineReader.class + ) public static class Inputs extends RawOpInputs { /** * Number of lines to skip from the beginning of every file. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java index 7eea770b3f7..3fa20389961 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/TfRecordReader.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * A Reader that outputs the records from a TensorFlow Records file. */ +@OpMetadata( + opType = TfRecordReader.OP_NAME, + inputsClass = TfRecordReader.Inputs.class +) @Operator( group = "io" ) @@ -45,8 +51,8 @@ public final class TfRecordReader extends RawOp implements Operand { private Output readerHandle; @SuppressWarnings("unchecked") - private TfRecordReader(Operation operation) { - super(operation); + public TfRecordReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -175,6 +181,9 @@ public Options compressionType(String compressionType) { } } + @OpInputsMetadata( + outputsClass = TfRecordReader.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this reader is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java index 919b46c7902..b71d8283cc8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WholeFileReader.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * To use, enqueue filenames in a Queue. The output of ReaderRead will * be a filename (key) and the contents of that file (value). */ +@OpMetadata( + opType = WholeFileReader.OP_NAME, + inputsClass = WholeFileReader.Inputs.class +) @Operator( group = "io" ) @@ -47,8 +53,8 @@ public final class WholeFileReader extends RawOp implements Operand { private Output readerHandle; @SuppressWarnings("unchecked") - private WholeFileReader(Operation operation) { - super(operation); + public WholeFileReader(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; readerHandle = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = WholeFileReader.class + ) public static class Inputs extends RawOpInputs { /** * If non-empty, this reader is placed in the given container. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java index 7143f27744a..a2d86a01565 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -33,6 +35,10 @@ * Writes contents to the file at input filename. Creates file and recursively * creates directory if not existing. */ +@OpMetadata( + opType = WriteFile.OP_NAME, + inputsClass = WriteFile.Inputs.class +) @Operator( group = "io" ) @@ -42,8 +48,8 @@ public final class WriteFile extends RawOp { */ public static final String OP_NAME = "WriteFile"; - private WriteFile(Operation operation) { - super(operation); + public WriteFile(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static WriteFile create(Scope scope, Operand filename, return new WriteFile(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteFile.class + ) public static class Inputs extends RawOpInputs { /** * scalar. The name of the file to which we write the contents. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java index ac43eb9df3d..c8fa49a410b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -66,6 +68,10 @@ * * @param data type for {@code band} output */ +@OpMetadata( + opType = BandPart.OP_NAME, + inputsClass = BandPart.Inputs.class +) @Operator( group = "linalg" ) @@ -77,8 +83,8 @@ public final class BandPart extends RawOp implements Operand private Output band; - private BandPart(Operation operation) { - super(operation); + public BandPart(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; band = operation.output(outputIdx++); } @@ -122,6 +128,9 @@ public Output asOutput() { return band; } + @OpInputsMetadata( + outputsClass = BandPart.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code k} tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java index 64f76d4ee42..17b00d7b7d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BandedTriangularSolve.OP_NAME, + inputsClass = BandedTriangularSolve.Inputs.class +) public final class BandedTriangularSolve extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BandedTriangularSolve extends RawOp implemen private Output output; - private BandedTriangularSolve(Operation operation) { - super(operation); + public BandedTriangularSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = BandedTriangularSolve.class + ) public static class Inputs extends RawOpInputs> { /** * The matrix input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java index d7751ef99fb..6fa5ec46905 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchCholesky.OP_NAME, + inputsClass = BatchCholesky.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchCholesky extends RawOp implements Ope private Output output; - private BatchCholesky(Operation operation) { - super(operation); + public BatchCholesky(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchCholesky.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java index a7671e74726..31d533e3165 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchCholeskyGrad.OP_NAME, + inputsClass = BatchCholeskyGrad.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchCholeskyGrad extends RawOp implements private Output output; - private BatchCholeskyGrad(Operation operation) { - super(operation); + public BatchCholeskyGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchCholeskyGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The l input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java index f8a440b50b8..c8eee9e13c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -37,6 +39,10 @@ * * @param data type for {@code band} output */ +@OpMetadata( + opType = BatchMatrixBandPart.OP_NAME, + inputsClass = BatchMatrixBandPart.Inputs.class +) @Operator( group = "linalg" ) @@ -48,8 +54,8 @@ public final class BatchMatrixBandPart extends RawOp implements private Output band; - private BatchMatrixBandPart(Operation operation) { - super(operation); + public BatchMatrixBandPart(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; band = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return band; } + @OpInputsMetadata( + outputsClass = BatchMatrixBandPart.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java index b6824b065de..db89c57ff0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixDeterminant.OP_NAME, + inputsClass = BatchMatrixDeterminant.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixDeterminant extends RawOp impleme private Output output; - private BatchMatrixDeterminant(Operation operation) { - super(operation); + public BatchMatrixDeterminant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchMatrixDeterminant.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java index 62ebc20bf8e..94fcdef153d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixDiag.OP_NAME, + inputsClass = BatchMatrixDiag.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixDiag extends RawOp implements Ope private Output output; - private BatchMatrixDiag(Operation operation) { - super(operation); + public BatchMatrixDiag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchMatrixDiag.class + ) public static class Inputs extends RawOpInputs> { /** * The diagonal input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java index c749fed819d..3bc5e4f137d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code diagonal} output */ +@OpMetadata( + opType = BatchMatrixDiagPart.OP_NAME, + inputsClass = BatchMatrixDiagPart.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixDiagPart extends RawOp implements private Output diagonal; - private BatchMatrixDiagPart(Operation operation) { - super(operation); + public BatchMatrixDiagPart(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; diagonal = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return diagonal; } + @OpInputsMetadata( + outputsClass = BatchMatrixDiagPart.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java index 5f9145ff005..0310cfa4d24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixInverse.OP_NAME, + inputsClass = BatchMatrixInverse.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixInverse extends RawOp implement private Output output; - private BatchMatrixInverse(Operation operation) { - super(operation); + public BatchMatrixInverse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -124,6 +130,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = BatchMatrixInverse.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java index 30711550209..98999b3e89c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixSetDiag.OP_NAME, + inputsClass = BatchMatrixSetDiag.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixSetDiag extends RawOp implements private Output output; - private BatchMatrixSetDiag(Operation operation) { - super(operation); + public BatchMatrixSetDiag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = BatchMatrixSetDiag.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java index 47af37fb73b..034497a1b7c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixSolve.OP_NAME, + inputsClass = BatchMatrixSolve.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixSolve extends RawOp implements private Output output; - private BatchMatrixSolve(Operation operation) { - super(operation); + public BatchMatrixSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -126,6 +132,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = BatchMatrixSolve.class + ) public static class Inputs extends RawOpInputs> { /** * The matrix input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java index 537dbc51cda..e27017cdfd8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat64; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixSolveLs.OP_NAME, + inputsClass = BatchMatrixSolveLs.Inputs.class +) @Operator( group = "linalg" ) @@ -48,8 +54,8 @@ public final class BatchMatrixSolveLs extends RawOp implement private Output output; - private BatchMatrixSolveLs(Operation operation) { - super(operation); + public BatchMatrixSolveLs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options fast(Boolean fast) { } } + @OpInputsMetadata( + outputsClass = BatchMatrixSolveLs.class + ) public static class Inputs extends RawOpInputs> { /** * The matrix input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java index b1a0fad24a9..c67b572576c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatrixTriangularSolve.OP_NAME, + inputsClass = BatchMatrixTriangularSolve.Inputs.class +) @Operator( group = "linalg" ) @@ -47,8 +53,8 @@ public final class BatchMatrixTriangularSolve extends RawOp i private Output output; - private BatchMatrixTriangularSolve(Operation operation) { - super(operation); + public BatchMatrixTriangularSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -152,6 +158,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = BatchMatrixTriangularSolve.class + ) public static class Inputs extends RawOpInputs> { /** * The matrix input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java index 77cc608d34c..6819006c568 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code e} output */ +@OpMetadata( + opType = BatchSelfAdjointEig.OP_NAME, + inputsClass = BatchSelfAdjointEig.Inputs.class +) @Operator( group = "linalg" ) @@ -49,8 +55,8 @@ public final class BatchSelfAdjointEig extends RawOp { private Output v; - private BatchSelfAdjointEig(Operation operation) { - super(operation); + public BatchSelfAdjointEig(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; e = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -131,6 +137,9 @@ public Options computeV(Boolean computeV) { } } + @OpInputsMetadata( + outputsClass = BatchSelfAdjointEig.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java index 0eed0e8668a..86d30c34858 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code s} output */ +@OpMetadata( + opType = BatchSvd.OP_NAME, + inputsClass = BatchSvd.Inputs.class +) @Operator( group = "linalg" ) @@ -51,8 +57,8 @@ public final class BatchSvd extends RawOp { private Output v; - private BatchSvd(Operation operation) { - super(operation); + public BatchSvd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; s = operation.output(outputIdx++); u = operation.output(outputIdx++); @@ -169,6 +175,9 @@ public Options fullMatrices(Boolean fullMatrices) { } } + @OpInputsMetadata( + outputsClass = BatchSvd.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java index 994fe407425..b8859edf2c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Cholesky.OP_NAME, + inputsClass = Cholesky.Inputs.class +) @Operator( group = "linalg" ) @@ -57,8 +63,8 @@ public final class Cholesky extends RawOp implements Operand private Output output; - private Cholesky(Operation operation) { - super(operation); + public Cholesky(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Cholesky.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java index b52d99e7c91..ba1d4870f95 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CholeskyGrad.OP_NAME, + inputsClass = CholeskyGrad.Inputs.class +) @Operator( group = "linalg" ) @@ -49,8 +55,8 @@ public final class CholeskyGrad extends RawOp implements Oper private Output output; - private CholeskyGrad(Operation operation) { - super(operation); + public CholeskyGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CholeskyGrad.class + ) public static class Inputs extends RawOpInputs> { /** * Output of batch Cholesky algorithm l = cholesky(A). Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java index 087147d3149..d65f3aa102c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = ConjugateTranspose.OP_NAME, + inputsClass = ConjugateTranspose.Inputs.class +) @Operator( group = "linalg" ) @@ -51,8 +57,8 @@ public final class ConjugateTranspose extends RawOp implements private Output y; - private ConjugateTranspose(Operation operation) { - super(operation); + public ConjugateTranspose(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = ConjugateTranspose.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java index 719266eeede..8c3a50c3080 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code product} output */ +@OpMetadata( + opType = Cross.OP_NAME, + inputsClass = Cross.Inputs.class +) @Operator( group = "linalg" ) @@ -50,8 +56,8 @@ public final class Cross extends RawOp implements Operand private Output product; - private Cross(Operation operation) { - super(operation); + public Cross(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return product; } + @OpInputsMetadata( + outputsClass = Cross.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor containing 3-element vectors. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java index 32aabbb3182..f92c8910ed5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Det.OP_NAME, + inputsClass = Det.Inputs.class +) @Operator( group = "linalg" ) @@ -50,8 +56,8 @@ public final class Det extends RawOp implements Operand { private Output output; - private Det(Operation operation) { - super(operation); + public Det(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Det.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java index ea31072f390..0c9a90d97d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ * * @param data type for {@code e} output */ +@OpMetadata( + opType = Eig.OP_NAME, + inputsClass = Eig.Inputs.class +) @Operator( group = "linalg" ) @@ -60,8 +66,8 @@ public final class Eig extends RawOp { private Output v; - private Eig(Operation operation) { - super(operation); + public Eig(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; e = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -146,6 +152,9 @@ public Options computeV(Boolean computeV) { } } + @OpInputsMetadata( + outputsClass = Eig.class + ) public static class Inputs extends RawOpInputs> { /** * {@code Tensor} input of shape {@code [N, N]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java index c273cf2f9f0..c628f4d6205 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -100,6 +102,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Einsum.OP_NAME, + inputsClass = Einsum.Inputs.class +) @Operator( group = "linalg" ) @@ -111,8 +117,8 @@ public final class Einsum extends RawOp implements Operand { private Output output; - private Einsum(Operation operation) { - super(operation); + public Einsum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Einsum.class + ) public static class Inputs extends RawOpInputs> { /** * List of 1 or 2 Tensors. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java index a1fdf41273e..ce6dbf71bd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = EuclideanNorm.OP_NAME, + inputsClass = EuclideanNorm.Inputs.class +) @Operator( group = "linalg" ) @@ -52,8 +58,8 @@ public final class EuclideanNorm extends RawOp implements Opera private Output output; - private EuclideanNorm(Operation operation) { - super(operation); + public EuclideanNorm(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = EuclideanNorm.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java index 708bf232915..84a267bc666 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Inv.OP_NAME, + inputsClass = Inv.Inputs.class +) @Operator( group = "linalg" ) @@ -54,8 +60,8 @@ public final class Inv extends RawOp implements Operand { private Output output; - private Inv(Operation operation) { - super(operation); + public Inv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -133,6 +139,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = Inv.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java index 31232e9c92b..6ad04c77b1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LoadAndRemapMatrix.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -64,6 +66,10 @@ * [w(0, 0), w(0, 2), -0.5], * [0.25, -0.25, 42]] */ +@OpMetadata( + opType = LoadAndRemapMatrix.OP_NAME, + inputsClass = LoadAndRemapMatrix.Inputs.class +) @Operator( group = "linalg" ) @@ -75,8 +81,8 @@ public final class LoadAndRemapMatrix extends RawOp implements Operand private Output outputMatrix; - private LoadAndRemapMatrix(Operation operation) { - super(operation); + public LoadAndRemapMatrix(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputMatrix = operation.output(outputIdx++); } @@ -177,6 +183,9 @@ public Options maxRowsInMemory(Long maxRowsInMemory) { } } + @OpInputsMetadata( + outputsClass = LoadAndRemapMatrix.class + ) public static class Inputs extends RawOpInputs { /** * Path to the TensorFlow checkpoint (version 2, {@code TensorBundle}) from diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java index 05456f98e52..c9aafe2e9f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code sign} output */ +@OpMetadata( + opType = LogMatrixDeterminant.OP_NAME, + inputsClass = LogMatrixDeterminant.Inputs.class +) @Operator( group = "linalg" ) @@ -57,8 +63,8 @@ public final class LogMatrixDeterminant extends RawOp { private Output logAbsDeterminant; - private LogMatrixDeterminant(Operation operation) { - super(operation); + public LogMatrixDeterminant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sign = operation.output(outputIdx++); logAbsDeterminant = operation.output(outputIdx++); @@ -100,6 +106,9 @@ public Output logAbsDeterminant() { return logAbsDeterminant; } + @OpInputsMetadata( + outputsClass = LogMatrixDeterminant.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [N, M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java index e61e8627340..392b184c72d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -54,6 +56,10 @@ * * @param data type for {@code p} output */ +@OpMetadata( + opType = Lu.OP_NAME, + inputsClass = Lu.Inputs.class +) @Operator( group = "linalg" ) @@ -67,8 +73,8 @@ public final class Lu extends RawOp { private Output p; - private Lu(Operation operation) { - super(operation); + public Lu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; lu = operation.output(outputIdx++); p = operation.output(outputIdx++); @@ -139,6 +145,9 @@ public Output p() { return p; } + @OpInputsMetadata( + outputsClass = Lu.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java index 139cbbb831e..e538d714e41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code product} output */ +@OpMetadata( + opType = MatMul.OP_NAME, + inputsClass = MatMul.Inputs.class +) @Operator( group = "linalg" ) @@ -53,8 +59,8 @@ public final class MatMul extends RawOp implements Operand { private Output product; - private MatMul(Operation operation) { - super(operation); + public MatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Options transposeB(Boolean transposeB) { } } + @OpInputsMetadata( + outputsClass = MatMul.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java index 9c6f94274bd..056b3ec173c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -117,6 +119,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MatrixDiag.OP_NAME, + inputsClass = MatrixDiag.Inputs.class +) @Operator( group = "linalg" ) @@ -128,8 +134,8 @@ public final class MatrixDiag extends RawOp implements Operand< private Output output; - private MatrixDiag(Operation operation) { - super(operation); + public MatrixDiag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -183,6 +189,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = MatrixDiag.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code r}, where {@code r >= 1} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java index d98d54862e4..8f82271074a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -97,6 +99,10 @@ * * @param data type for {@code diagonal} output */ +@OpMetadata( + opType = MatrixDiagPart.OP_NAME, + inputsClass = MatrixDiagPart.Inputs.class +) @Operator( group = "linalg" ) @@ -108,8 +114,8 @@ public final class MatrixDiagPart extends RawOp implements Oper private Output diagonal; - private MatrixDiagPart(Operation operation) { - super(operation); + public MatrixDiagPart(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; diagonal = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Output asOutput() { return diagonal; } + @OpInputsMetadata( + outputsClass = MatrixDiagPart.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code r} tensor where {@code r >= 2}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java index 038c744a651..62cc8403b8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -127,6 +129,10 @@ * * @param data type for {@code diagonal} output */ +@OpMetadata( + opType = MatrixDiagPartV3.OP_NAME, + inputsClass = MatrixDiagPartV3.Inputs.class +) @Operator( group = "linalg" ) @@ -138,8 +144,8 @@ public final class MatrixDiagPartV3 extends RawOp implements Op private Output diagonal; - private MatrixDiagPartV3(Operation operation) { - super(operation); + public MatrixDiagPartV3(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; diagonal = operation.output(outputIdx++); } @@ -235,6 +241,9 @@ public Options align(String align) { } } + @OpInputsMetadata( + outputsClass = MatrixDiagPartV3.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code r} tensor where {@code r >= 2}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java index 814a25b4058..4c1f9c5649a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -145,6 +147,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MatrixDiagV3.OP_NAME, + inputsClass = MatrixDiagV3.Inputs.class +) @Operator( group = "linalg" ) @@ -156,8 +162,8 @@ public final class MatrixDiagV3 extends RawOp implements Operan private Output output; - private MatrixDiagV3(Operation operation) { - super(operation); + public MatrixDiagV3(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -262,6 +268,9 @@ public Options align(String align) { } } + @OpInputsMetadata( + outputsClass = MatrixDiagV3.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code r}, where {@code r >= 1} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java index d0c74cd771b..19d87ac7b57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MatrixLogarithm.OP_NAME, + inputsClass = MatrixLogarithm.Inputs.class +) public final class MatrixLogarithm extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -54,8 +60,8 @@ public final class MatrixLogarithm extends RawOp implements Ope private Output output; - private MatrixLogarithm(Operation operation) { - super(operation); + public MatrixLogarithm(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = MatrixLogarithm.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java index 89ebedfe50f..726a383702a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -133,6 +135,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MatrixSetDiag.OP_NAME, + inputsClass = MatrixSetDiag.Inputs.class +) @Operator( group = "linalg" ) @@ -144,8 +150,8 @@ public final class MatrixSetDiag extends RawOp implements Opera private Output output; - private MatrixSetDiag(Operation operation) { - super(operation); + public MatrixSetDiag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -241,6 +247,9 @@ public Options align(String align) { } } + @OpInputsMetadata( + outputsClass = MatrixSetDiag.class + ) public static class Inputs extends RawOpInputs> { /** * Rank {@code r+1}, where {@code r >= 1}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java index a1a0e24aea4..c3e0a51ac2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat64; @@ -67,6 +69,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MatrixSolveLs.OP_NAME, + inputsClass = MatrixSolveLs.Inputs.class +) @Operator( group = "linalg" ) @@ -78,8 +84,8 @@ public final class MatrixSolveLs extends RawOp implements Opera private Output output; - private MatrixSolveLs(Operation operation) { - super(operation); + public MatrixSolveLs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -162,6 +168,9 @@ public Options fast(Boolean fast) { } } + @OpInputsMetadata( + outputsClass = MatrixSolveLs.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, N]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java index 618f33cc405..2c86bec5b9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @param data type for {@code q} output */ +@OpMetadata( + opType = Qr.OP_NAME, + inputsClass = Qr.Inputs.class +) @Operator( group = "linalg" ) @@ -61,8 +67,8 @@ public final class Qr extends RawOp { private Output r; - private Qr(Operation operation) { - super(operation); + public Qr(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; q = operation.output(outputIdx++); r = operation.output(outputIdx++); @@ -148,6 +154,9 @@ public Options fullMatrices(Boolean fullMatrices) { } } + @OpInputsMetadata( + outputsClass = Qr.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java index 151f366432c..4eb611641db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -42,6 +44,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMul.OP_NAME, + inputsClass = QuantizedMatMul.Inputs.class +) @Operator( group = "linalg" ) @@ -57,8 +63,8 @@ public final class QuantizedMatMul extends RawOp { private Output maxOut; - private QuantizedMatMul(Operation operation) { - super(operation); + public QuantizedMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -193,6 +199,9 @@ public Options transposeB(Boolean transposeB) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMul.class + ) public static class Inputs extends RawOpInputs> { /** * Must be a two-dimensional tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java index e55131173ff..ac098c6bbc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMulWithBias.OP_NAME, + inputsClass = QuantizedMatMulWithBias.Inputs.class +) public final class QuantizedMatMulWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -55,8 +61,8 @@ public final class QuantizedMatMulWithBias extends RawOp { private Output maxOut; - private QuantizedMatMulWithBias(Operation operation) { - super(operation); + public QuantizedMatMulWithBias(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -216,6 +222,9 @@ public Options inputQuantMode(String inputQuantMode) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMulWithBias.class + ) public static class Inputs extends RawOpInputs> { /** * A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java index 89adf2b6063..4643805d6d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMulWithBiasAndRelu.OP_NAME, + inputsClass = QuantizedMatMulWithBiasAndRelu.Inputs.class +) public final class QuantizedMatMulWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -56,8 +62,8 @@ public final class QuantizedMatMulWithBiasAndRelu extends Raw private Output maxOut; - private QuantizedMatMulWithBiasAndRelu(Operation operation) { - super(operation); + public QuantizedMatMulWithBiasAndRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -217,6 +223,9 @@ public Options inputQuantMode(String inputQuantMode) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMulWithBiasAndRelu.class + ) public static class Inputs extends RawOpInputs> { /** * A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java index adf556ae8d3..deb84874e4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMulWithBiasAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedMatMulWithBiasAndReluAndRequantize.Inputs.class +) public final class QuantizedMatMulWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -57,8 +63,8 @@ public final class QuantizedMatMulWithBiasAndReluAndRequantize maxOut; - private QuantizedMatMulWithBiasAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedMatMulWithBiasAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -223,6 +229,9 @@ public Options inputQuantMode(String inputQuantMode) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMulWithBiasAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java index adde5950814..2f7bba28e22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code e} output */ +@OpMetadata( + opType = SelfAdjointEig.OP_NAME, + inputsClass = SelfAdjointEig.Inputs.class +) @Operator( group = "linalg" ) @@ -59,8 +65,8 @@ public final class SelfAdjointEig extends RawOp { private Output v; - private SelfAdjointEig(Operation operation) { - super(operation); + public SelfAdjointEig(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; e = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -143,6 +149,9 @@ public Options computeV(Boolean computeV) { } } + @OpInputsMetadata( + outputsClass = SelfAdjointEig.class + ) public static class Inputs extends RawOpInputs> { /** * {@code Tensor} input of shape {@code [N, N]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java index f07de1dcd02..7da5179d0f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Solve.OP_NAME, + inputsClass = Solve.Inputs.class +) @Operator( group = "linalg" ) @@ -53,8 +59,8 @@ public final class Solve extends RawOp implements Operand { private Output output; - private Solve(Operation operation) { - super(operation); + public Solve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -134,6 +140,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = Solve.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java index cb633920385..c426849f57f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Sqrtm.OP_NAME, + inputsClass = Sqrtm.Inputs.class +) @Operator( group = "linalg" ) @@ -59,8 +65,8 @@ public final class Sqrtm extends RawOp implements Operand { private Output output; - private Sqrtm(Operation operation) { - super(operation); + public Sqrtm(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Sqrtm.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java index a3b388794cc..5d515f65548 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code s} output */ +@OpMetadata( + opType = Svd.OP_NAME, + inputsClass = Svd.Inputs.class +) @Operator( group = "linalg" ) @@ -61,8 +67,8 @@ public final class Svd extends RawOp { private Output v; - private Svd(Operation operation) { - super(operation); + public Svd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; s = operation.output(outputIdx++); u = operation.output(outputIdx++); @@ -191,6 +197,9 @@ public Options fullMatrices(Boolean fullMatrices) { } } + @OpInputsMetadata( + outputsClass = Svd.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java index 4dac3f09451..2e1b99481cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TensorDiag.OP_NAME, + inputsClass = TensorDiag.Inputs.class +) @Operator( group = "linalg" ) @@ -60,8 +66,8 @@ public final class TensorDiag extends RawOp implements Operand< private Output output; - private TensorDiag(Operation operation) { - super(operation); + public TensorDiag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TensorDiag.class + ) public static class Inputs extends RawOpInputs> { /** * Rank k tensor where k is at most 1. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java index 6cc286ea41b..e7f24e7e5c7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -50,6 +52,10 @@ * * @param data type for {@code diagonal} output */ +@OpMetadata( + opType = TensorDiagPart.OP_NAME, + inputsClass = TensorDiagPart.Inputs.class +) @Operator( group = "linalg" ) @@ -61,8 +67,8 @@ public final class TensorDiagPart extends RawOp implements Oper private Output diagonal; - private TensorDiagPart(Operation operation) { - super(operation); + public TensorDiagPart(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; diagonal = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return diagonal; } + @OpInputsMetadata( + outputsClass = TensorDiagPart.class + ) public static class Inputs extends RawOpInputs> { /** * Rank k tensor where k is even and not zero. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java index cb3430f85d4..504f4ba3d81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Transpose.OP_NAME, + inputsClass = Transpose.Inputs.class +) @Operator( group = "linalg" ) @@ -50,8 +56,8 @@ public final class Transpose extends RawOp implements Operand y; - private Transpose(Operation operation) { - super(operation); + public Transpose(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Transpose.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java index 65f0269ce1d..9fbc4979644 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -78,6 +80,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TriangularSolve.OP_NAME, + inputsClass = TriangularSolve.Inputs.class +) @Operator( group = "linalg" ) @@ -89,8 +95,8 @@ public final class TriangularSolve extends RawOp implements Ope private Output output; - private TriangularSolve(Operation operation) { - super(operation); + public TriangularSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -204,6 +210,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = TriangularSolve.class + ) public static class Inputs extends RawOpInputs> { /** * Shape is {@code [..., M, M]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java index 544a71da27d..e27f5e67b08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TridiagonalMatMul.OP_NAME, + inputsClass = TridiagonalMatMul.Inputs.class +) public final class TridiagonalMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class TridiagonalMatMul extends RawOp implements O private Output output; - private TridiagonalMatMul(Operation operation) { - super(operation); + public TridiagonalMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TridiagonalMatMul.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor of shape {@code [..., 1, M]}, representing superdiagonals of diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java index a62030705f6..d9fccfaebe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TridiagonalSolve.OP_NAME, + inputsClass = TridiagonalSolve.Inputs.class +) public final class TridiagonalSolve extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class TridiagonalSolve extends RawOp implements Op private Output output; - private TridiagonalSolve(Operation operation) { - super(operation); + public TridiagonalSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -161,6 +167,9 @@ public Options perturbSingular(Boolean perturbSingular) { } } + @OpInputsMetadata( + outputsClass = TridiagonalSolve.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor of shape {@code [..., 3, M]} whose innermost 2 dimensions represent the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java index 441df3a3e05..2251ead4b68 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = CSRSparseMatrixComponents.OP_NAME, + inputsClass = CSRSparseMatrixComponents.Inputs.class +) public final class CSRSparseMatrixComponents extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -51,8 +57,8 @@ public final class CSRSparseMatrixComponents extends RawOp { private Output values; - private CSRSparseMatrixComponents(Operation operation) { - super(operation); + public CSRSparseMatrixComponents(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; rowPtrs = operation.output(outputIdx++); colInds = operation.output(outputIdx++); @@ -108,6 +114,9 @@ public Output values() { return values; } + @OpInputsMetadata( + outputsClass = CSRSparseMatrixComponents.class + ) public static class Inputs extends RawOpInputs> { /** * A batched CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java index 032c356bb97..3d0a5b37604 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code dense_output} output */ +@OpMetadata( + opType = CSRSparseMatrixToDense.OP_NAME, + inputsClass = CSRSparseMatrixToDense.Inputs.class +) public final class CSRSparseMatrixToDense extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class CSRSparseMatrixToDense extends RawOp impleme private Output denseOutput; - private CSRSparseMatrixToDense(Operation operation) { - super(operation); + public CSRSparseMatrixToDense(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; denseOutput = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return denseOutput; } + @OpInputsMetadata( + outputsClass = CSRSparseMatrixToDense.class + ) public static class Inputs extends RawOpInputs> { /** * A batched CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java index e34a47fad2c..4cb3929785f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = CSRSparseMatrixToSparseTensor.OP_NAME, + inputsClass = CSRSparseMatrixToSparseTensor.Inputs.class +) public final class CSRSparseMatrixToSparseTensor extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class CSRSparseMatrixToSparseTensor extends RawOp private Output denseShape; - private CSRSparseMatrixToSparseTensor(Operation operation) { - super(operation); + public CSRSparseMatrixToSparseTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; indices = operation.output(outputIdx++); values = operation.output(outputIdx++); @@ -104,6 +110,9 @@ public Output denseShape() { return denseShape; } + @OpInputsMetadata( + outputsClass = CSRSparseMatrixToSparseTensor.class + ) public static class Inputs extends RawOpInputs> { /** * A (possibly batched) CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java index b1edd569963..403903fa74a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/DenseToCSRSparseMatrix.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Converts a dense tensor to a (possibly batched) CSRSparseMatrix. */ +@OpMetadata( + opType = DenseToCSRSparseMatrix.OP_NAME, + inputsClass = DenseToCSRSparseMatrix.Inputs.class +) public final class DenseToCSRSparseMatrix extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class DenseToCSRSparseMatrix extends RawOp implements Operand sparseOutput; @SuppressWarnings("unchecked") - private DenseToCSRSparseMatrix(Operation operation) { - super(operation); + public DenseToCSRSparseMatrix(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseOutput = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return (Output) sparseOutput; } + @OpInputsMetadata( + outputsClass = DenseToCSRSparseMatrix.class + ) public static class Inputs extends RawOpInputs { /** * A Dense tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java index 36a84e7f892..13b704ce778 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not * currently defined (TensorFlow will return zeros for these entries). */ +@OpMetadata( + opType = SparseMatrixAdd.OP_NAME, + inputsClass = SparseMatrixAdd.Inputs.class +) public final class SparseMatrixAdd extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class SparseMatrixAdd extends RawOp implements Operand { private Output c; @SuppressWarnings("unchecked") - private SparseMatrixAdd(Operation operation) { - super(operation); + public SparseMatrixAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; c = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return (Output) c; } + @OpInputsMetadata( + outputsClass = SparseMatrixAdd.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java index 11b776289df..5ad55a2bd3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -56,6 +58,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseMatrixMatMul.OP_NAME, + inputsClass = SparseMatrixMatMul.Inputs.class +) public final class SparseMatrixMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -64,8 +70,8 @@ public final class SparseMatrixMatMul extends RawOp implements private Output output; - private SparseMatrixMatMul(Operation operation) { - super(operation); + public SparseMatrixMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -273,6 +279,9 @@ public Options conjugateOutput(Boolean conjugateOutput) { } } + @OpInputsMetadata( + outputsClass = SparseMatrixMatMul.class + ) public static class Inputs extends RawOpInputs> { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java index ce8d51f00a6..cb7b25cea40 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ *

    NOTE even if {@code b} is zero, the sparsity structure of the output does not * change. */ +@OpMetadata( + opType = SparseMatrixMul.OP_NAME, + inputsClass = SparseMatrixMul.Inputs.class +) public final class SparseMatrixMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class SparseMatrixMul extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private SparseMatrixMul(Operation operation) { - super(operation); + public SparseMatrixMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = SparseMatrixMul.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java index a6c19832dd2..1d92de3aee1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixNNZ.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; /** * Returns the number of nonzeroes of {@code sparse_matrix}. */ +@OpMetadata( + opType = SparseMatrixNNZ.OP_NAME, + inputsClass = SparseMatrixNNZ.Inputs.class +) public final class SparseMatrixNNZ extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class SparseMatrixNNZ extends RawOp implements Operand { private Output nnz; - private SparseMatrixNNZ(Operation operation) { - super(operation); + public SparseMatrixNNZ(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; nnz = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return nnz; } + @OpInputsMetadata( + outputsClass = SparseMatrixNNZ.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java index 9b35eac22c9..7a766931103 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixOrderingAMD.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -74,6 +76,10 @@ *

    {@code ordering_amd_value} stores the AMD ordering: {@code [1 2 3 0]}. *

    input: A {@code CSRSparseMatrix}. */ +@OpMetadata( + opType = SparseMatrixOrderingAMD.OP_NAME, + inputsClass = SparseMatrixOrderingAMD.Inputs.class +) public final class SparseMatrixOrderingAMD extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -82,8 +88,8 @@ public final class SparseMatrixOrderingAMD extends RawOp implements Operand output; - private SparseMatrixOrderingAMD(Operation operation) { - super(operation); + public SparseMatrixOrderingAMD(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -118,6 +124,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseMatrixOrderingAMD.class + ) public static class Inputs extends RawOpInputs { /** * A {@code CSRSparseMatrix}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java index 5dd3f79012c..a11eaaf2e98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmax.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * the output has the same sparsity structure as the input (though missing values * in the output may now be treated as having probability zero). */ +@OpMetadata( + opType = SparseMatrixSoftmax.OP_NAME, + inputsClass = SparseMatrixSoftmax.Inputs.class +) public final class SparseMatrixSoftmax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class SparseMatrixSoftmax extends RawOp implements Operand { private Output softmax; @SuppressWarnings("unchecked") - private SparseMatrixSoftmax(Operation operation) { - super(operation); + public SparseMatrixSoftmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; softmax = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return (Output) softmax; } + @OpInputsMetadata( + outputsClass = SparseMatrixSoftmax.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java index 05ae75273bc..3a2d3f1fffa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSoftmaxGrad.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ /** * Calculates the gradient of the SparseMatrixSoftmax op. */ +@OpMetadata( + opType = SparseMatrixSoftmaxGrad.OP_NAME, + inputsClass = SparseMatrixSoftmaxGrad.Inputs.class +) public final class SparseMatrixSoftmaxGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class SparseMatrixSoftmaxGrad extends RawOp implements Operand gradient; @SuppressWarnings("unchecked") - private SparseMatrixSoftmaxGrad(Operation operation) { - super(operation); + public SparseMatrixSoftmaxGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; gradient = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return (Output) gradient; } + @OpInputsMetadata( + outputsClass = SparseMatrixSoftmaxGrad.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java index bd8d2d0fcc4..c5b31229d45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseCholesky.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -99,6 +101,10 @@ * permutation: A {@code Tensor}. * type: The type of {@code input}. */ +@OpMetadata( + opType = SparseMatrixSparseCholesky.OP_NAME, + inputsClass = SparseMatrixSparseCholesky.Inputs.class +) public final class SparseMatrixSparseCholesky extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -108,8 +114,8 @@ public final class SparseMatrixSparseCholesky extends RawOp implements Operand output; @SuppressWarnings("unchecked") - private SparseMatrixSparseCholesky(Operation operation) { - super(operation); + public SparseMatrixSparseCholesky(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = SparseMatrixSparseCholesky.class + ) public static class Inputs extends RawOpInputs { /** * A {@code CSRSparseMatrix}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java index 5c4564e1067..4f3e5d718ee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixSparseMatMul.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -100,6 +102,10 @@ * adjoint_a: If True, {@code a} adjointed before multiplication. * adjoint_b: If True, {@code b} adjointed before multiplication. */ +@OpMetadata( + opType = SparseMatrixSparseMatMul.OP_NAME, + inputsClass = SparseMatrixSparseMatMul.Inputs.class +) public final class SparseMatrixSparseMatMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -109,8 +115,8 @@ public final class SparseMatrixSparseMatMul extends RawOp implements Operand c; @SuppressWarnings("unchecked") - private SparseMatrixSparseMatMul(Operation operation) { - super(operation); + public SparseMatrixSparseMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; c = operation.output(outputIdx++); } @@ -269,6 +275,9 @@ public Options adjointB(Boolean adjointB) { } } + @OpInputsMetadata( + outputsClass = SparseMatrixSparseMatMul.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java index e8a0beceddb..2219a63bdfc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixTranspose.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * Transposes the inner (matrix) dimensions of a SparseMatrix and optionally * conjugates its values. */ +@OpMetadata( + opType = SparseMatrixTranspose.OP_NAME, + inputsClass = SparseMatrixTranspose.Inputs.class +) public final class SparseMatrixTranspose extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class SparseMatrixTranspose extends RawOp implements Operand private Output output; @SuppressWarnings("unchecked") - private SparseMatrixTranspose(Operation operation) { - super(operation); + public SparseMatrixTranspose(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -125,6 +131,9 @@ public Options conjugate(Boolean conjugate) { } } + @OpInputsMetadata( + outputsClass = SparseMatrixTranspose.class + ) public static class Inputs extends RawOpInputs { /** * A CSRSparseMatrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java index 6502ad7d100..91080f1f093 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixZeros.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ /** * Creates an all-zeros CSRSparseMatrix with shape {@code dense_shape}. */ +@OpMetadata( + opType = SparseMatrixZeros.OP_NAME, + inputsClass = SparseMatrixZeros.Inputs.class +) public final class SparseMatrixZeros extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class SparseMatrixZeros extends RawOp implements Operand { private Output sparseMatrix; @SuppressWarnings("unchecked") - private SparseMatrixZeros(Operation operation) { - super(operation); + public SparseMatrixZeros(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseMatrix = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return (Output) sparseMatrix; } + @OpInputsMetadata( + outputsClass = SparseMatrixZeros.class + ) public static class Inputs extends RawOpInputs { /** * The desired matrix shape. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java index 36d6fb8013e..d27421d7375 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseTensorToCSRSparseMatrix.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * Converts a SparseTensor to a (possibly batched) CSRSparseMatrix. */ +@OpMetadata( + opType = SparseTensorToCSRSparseMatrix.OP_NAME, + inputsClass = SparseTensorToCSRSparseMatrix.Inputs.class +) public final class SparseTensorToCSRSparseMatrix extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class SparseTensorToCSRSparseMatrix extends RawOp implements Operan private Output sparseMatrix; @SuppressWarnings("unchecked") - private SparseTensorToCSRSparseMatrix(Operation operation) { - super(operation); + public SparseTensorToCSRSparseMatrix(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseMatrix = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return (Output) sparseMatrix; } + @OpInputsMetadata( + outputsClass = SparseTensorToCSRSparseMatrix.class + ) public static class Inputs extends RawOpInputs { /** * SparseTensor indices. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java index c78b34ed12f..c19b0c28eed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Abs.OP_NAME, + inputsClass = Abs.Inputs.class +) @Operator( group = "math" ) @@ -50,8 +56,8 @@ public final class Abs extends RawOp implements Operand { private Output y; - private Abs(Operation operation) { - super(operation); + public Abs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Abs.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java index 7a531d623e8..a958c7f6938 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code sum} output */ +@OpMetadata( + opType = AccumulateN.OP_NAME, + inputsClass = AccumulateN.Inputs.class +) @Operator( group = "math" ) @@ -55,8 +61,8 @@ public final class AccumulateN extends RawOp implements Operand private Output sum; - private AccumulateN(Operation operation) { - super(operation); + public AccumulateN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sum = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return sum; } + @OpInputsMetadata( + outputsClass = AccumulateN.class + ) public static class Inputs extends RawOpInputs> { /** * A list of {@code Tensor} objects, each with same shape and type. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java index bc2d95bfe5c..977099004bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Acos.OP_NAME, + inputsClass = Acos.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Acos extends RawOp implements Operand { private Output y; - private Acos(Operation operation) { - super(operation); + public Acos(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Acos.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java index d273129375d..39da836345c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Acosh.OP_NAME, + inputsClass = Acosh.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class Acosh extends RawOp implements Operand { private Output y; - private Acosh(Operation operation) { - super(operation); + public Acosh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Acosh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java index 19bb9502f1d..ec17877eed3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Add.OP_NAME, + inputsClass = Add.Inputs.class +) @Operator( group = "math" ) @@ -51,8 +57,8 @@ public final class Add extends RawOp implements Operand { private Output z; - private Add(Operation operation) { - super(operation); + public Add(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Add.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java index 04d7c198272..27f572e53a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code sum} output */ +@OpMetadata( + opType = AddN.OP_NAME, + inputsClass = AddN.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class AddN extends RawOp implements Operand { private Output sum; - private AddN(Operation operation) { - super(operation); + public AddN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sum = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return sum; } + @OpInputsMetadata( + outputsClass = AddN.class + ) public static class Inputs extends RawOpInputs> { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java index afb2b79ab48..d0e971a9a7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -52,6 +54,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Angle.OP_NAME, + inputsClass = Angle.Inputs.class +) @Operator( group = "math" ) @@ -63,8 +69,8 @@ public final class Angle extends RawOp implements Operand private Output output; - private Angle(Operation operation) { - super(operation); + public Angle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -117,6 +123,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Angle.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java index 5a8173def8e..0700aea3d11 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ApproximateEqual.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -35,6 +37,10 @@ /** * Returns the truth value of abs(x-y) < tolerance element-wise. */ +@OpMetadata( + opType = ApproximateEqual.OP_NAME, + inputsClass = ApproximateEqual.Inputs.class +) @Operator( group = "math" ) @@ -46,8 +52,8 @@ public final class ApproximateEqual extends RawOp implements Operand { private Output z; - private ApproximateEqual(Operation operation) { - super(operation); + public ApproximateEqual(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -125,6 +131,9 @@ public Options tolerance(Float tolerance) { } } + @OpInputsMetadata( + outputsClass = ApproximateEqual.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java index 91b94a54d2b..0d0212311e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ArgMax.OP_NAME, + inputsClass = ArgMax.Inputs.class +) @Operator( group = "math" ) @@ -60,8 +66,8 @@ public final class ArgMax extends RawOp implements Operand private Output output; - private ArgMax(Operation operation) { - super(operation); + public ArgMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -122,6 +128,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ArgMax.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java index a5742d5c542..f5c7c4a058f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ArgMin.OP_NAME, + inputsClass = ArgMin.Inputs.class +) @Operator( group = "math" ) @@ -60,8 +66,8 @@ public final class ArgMin extends RawOp implements Operand private Output output; - private ArgMin(Operation operation) { - super(operation); + public ArgMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -122,6 +128,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ArgMin.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java index 0ab2256368d..764b633187e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Asin.OP_NAME, + inputsClass = Asin.Inputs.class +) @Operator( group = "math" ) @@ -59,8 +65,8 @@ public final class Asin extends RawOp implements Operand { private Output y; - private Asin(Operation operation) { - super(operation); + public Asin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Asin.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java index bd402f3f550..3483c91a719 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Asinh.OP_NAME, + inputsClass = Asinh.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Asinh extends RawOp implements Operand { private Output y; - private Asinh(Operation operation) { - super(operation); + public Asinh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Asinh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java index 06edc00e74a..1973f102d39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Atan.OP_NAME, + inputsClass = Atan.Inputs.class +) @Operator( group = "math" ) @@ -59,8 +65,8 @@ public final class Atan extends RawOp implements Operand { private Output y; - private Atan(Operation operation) { - super(operation); + public Atan(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Atan.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java index 675120effab..3458cf38200 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -52,6 +54,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Atan2.OP_NAME, + inputsClass = Atan2.Inputs.class +) @Operator( group = "math" ) @@ -63,8 +69,8 @@ public final class Atan2 extends RawOp implements Operand private Output z; - private Atan2(Operation operation) { - super(operation); + public Atan2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -102,6 +108,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Atan2.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java index c9aff2cf3a3..bd0c86a60db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -45,6 +47,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Atanh.OP_NAME, + inputsClass = Atanh.Inputs.class +) @Operator( group = "math" ) @@ -56,8 +62,8 @@ public final class Atanh extends RawOp implements Operand { private Output y; - private Atanh(Operation operation) { - super(operation); + public Atanh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Atanh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java index c7824375420..62505b7e4e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselI0.OP_NAME, + inputsClass = BesselI0.Inputs.class +) public final class BesselI0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselI0 extends RawOp implements Operand< private Output y; - private BesselI0(Operation operation) { - super(operation); + public BesselI0(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselI0.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java index a557e9f294e..1386e6ceefb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselI0e.OP_NAME, + inputsClass = BesselI0e.Inputs.class +) public final class BesselI0e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselI0e extends RawOp implements Operand private Output y; - private BesselI0e(Operation operation) { - super(operation); + public BesselI0e(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselI0e.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java index 3b5362106b7..16a2a62d0b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselI1.OP_NAME, + inputsClass = BesselI1.Inputs.class +) public final class BesselI1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselI1 extends RawOp implements Operand< private Output y; - private BesselI1(Operation operation) { - super(operation); + public BesselI1(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselI1.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java index 00461637760..4390f61cde7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselI1e.OP_NAME, + inputsClass = BesselI1e.Inputs.class +) public final class BesselI1e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselI1e extends RawOp implements Operand private Output y; - private BesselI1e(Operation operation) { - super(operation); + public BesselI1e(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselI1e.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java index 3e873075268..b4c5bdf1a77 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Betainc.OP_NAME, + inputsClass = Betainc.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class Betainc extends RawOp implements Operand z; - private Betainc(Operation operation) { - super(operation); + public Betainc(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Betainc.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java index ae7d1475d31..e7f6bc53875 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ * * @param data type for {@code bins} output */ +@OpMetadata( + opType = Bincount.OP_NAME, + inputsClass = Bincount.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Bincount extends RawOp implements Operand< private Output bins; - private Bincount(Operation operation) { - super(operation); + public Bincount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; bins = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return bins; } + @OpInputsMetadata( + outputsClass = Bincount.class + ) public static class Inputs extends RawOpInputs> { /** * int32 {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java index f27874e0c69..58dd4718ee7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Ceil.OP_NAME, + inputsClass = Ceil.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Ceil extends RawOp implements Operand { private Output y; - private Ceil(Operation operation) { - super(operation); + public Ceil(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Ceil.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java index c03e3d836d1..b01366920ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -53,6 +55,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = ComplexAbs.OP_NAME, + inputsClass = ComplexAbs.Inputs.class +) @Operator( group = "math" ) @@ -64,8 +70,8 @@ public final class ComplexAbs extends RawOp implements Operan private Output y; - private ComplexAbs(Operation operation) { - super(operation); + public ComplexAbs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -118,6 +124,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = ComplexAbs.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java index c07c7b6d9dd..9878f1cbc55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conj.OP_NAME, + inputsClass = Conj.Inputs.class +) @Operator( group = "math" ) @@ -57,8 +63,8 @@ public final class Conj extends RawOp implements Operand { private Output output; - private Conj(Operation operation) { - super(operation); + public Conj(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Conj.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java index ddfe40b21dd..ebae2e0ab01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Cos.OP_NAME, + inputsClass = Cos.Inputs.class +) @Operator( group = "math" ) @@ -55,8 +61,8 @@ public final class Cos extends RawOp implements Operand { private Output y; - private Cos(Operation operation) { - super(operation); + public Cos(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Cos.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java index 4cebf838097..ff272324595 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Cosh.OP_NAME, + inputsClass = Cosh.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Cosh extends RawOp implements Operand { private Output y; - private Cosh(Operation operation) { - super(operation); + public Cosh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Cosh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java index fa818c7aa06..19b5ff33e5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = Cumprod.OP_NAME, + inputsClass = Cumprod.Inputs.class +) @Operator( group = "math" ) @@ -68,8 +74,8 @@ public final class Cumprod extends RawOp implements Operand private Output out; - private Cumprod(Operation operation) { - super(operation); + public Cumprod(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -176,6 +182,9 @@ public Options reverse(Boolean reverse) { } } + @OpInputsMetadata( + outputsClass = Cumprod.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java index f0b4b9b2e45..a5ace804137 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = Cumsum.OP_NAME, + inputsClass = Cumsum.Inputs.class +) @Operator( group = "math" ) @@ -68,8 +74,8 @@ public final class Cumsum extends RawOp implements Operand { private Output out; - private Cumsum(Operation operation) { - super(operation); + public Cumsum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -176,6 +182,9 @@ public Options reverse(Boolean reverse) { } } + @OpInputsMetadata( + outputsClass = Cumsum.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java index ca5448a9960..b727b2aa70c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -51,6 +53,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = CumulativeLogsumexp.OP_NAME, + inputsClass = CumulativeLogsumexp.Inputs.class +) public final class CumulativeLogsumexp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -59,8 +65,8 @@ public final class CumulativeLogsumexp extends RawOp implemen private Output out; - private CumulativeLogsumexp(Operation operation) { - super(operation); + public CumulativeLogsumexp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -165,6 +171,9 @@ public Options reverse(Boolean reverse) { } } + @OpInputsMetadata( + outputsClass = CumulativeLogsumexp.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor}. Must be one of the following types: {@code float16}, {@code float32}, {@code float64}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java index 556c582b9d6..24949d906ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DenseBincount.OP_NAME, + inputsClass = DenseBincount.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class DenseBincount extends RawOp implements Ope private Output output; - private DenseBincount(Operation operation) { - super(operation); + public DenseBincount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -138,6 +144,9 @@ public Options binaryOutput(Boolean binaryOutput) { } } + @OpInputsMetadata( + outputsClass = DenseBincount.class + ) public static class Inputs extends RawOpInputs> { /** * 1D or 2D int {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java index 7d50c38999a..3c4b64d60e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Digamma.OP_NAME, + inputsClass = Digamma.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Digamma extends RawOp implements Operand y; - private Digamma(Operation operation) { - super(operation); + public Digamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Digamma.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java index 7a8a0ca06de..e95a4b3da69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Div.OP_NAME, + inputsClass = Div.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Div extends RawOp implements Operand { private Output z; - private Div(Operation operation) { - super(operation); + public Div(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Div.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java index d21762c3127..ffac20c160b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = DivNoNan.OP_NAME, + inputsClass = DivNoNan.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class DivNoNan extends RawOp implements Operand private Output z; - private DivNoNan(Operation operation) { - super(operation); + public DivNoNan(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = DivNoNan.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java index 625624b428c..2695c76a5e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Equal.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -46,6 +48,10 @@ * tf.math.equal(x, y) ==> array([True, True]) * */ +@OpMetadata( + opType = Equal.OP_NAME, + inputsClass = Equal.Inputs.class +) @Operator( group = "math" ) @@ -57,8 +63,8 @@ public final class Equal extends RawOp implements Operand { private Output z; - private Equal(Operation operation) { - super(operation); + public Equal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -136,6 +142,9 @@ public Options incompatibleShapeError(Boolean incompatibleShapeError) { } } + @OpInputsMetadata( + outputsClass = Equal.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java index 8960e9851f5..fe8a0ed2afc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Erf.OP_NAME, + inputsClass = Erf.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Erf extends RawOp implements Operand { private Output y; - private Erf(Operation operation) { - super(operation); + public Erf(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Erf.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java index c24a60a10a4..cbc503be446 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Erfc.OP_NAME, + inputsClass = Erfc.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Erfc extends RawOp implements Operand { private Output y; - private Erfc(Operation operation) { - super(operation); + public Erfc(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Erfc.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java index edc260dd2ae..0ddf4078f0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -57,6 +59,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Exp.OP_NAME, + inputsClass = Exp.Inputs.class +) @Operator( group = "math" ) @@ -68,8 +74,8 @@ public final class Exp extends RawOp implements Operand { private Output y; - private Exp(Operation operation) { - super(operation); + public Exp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Exp.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java index 29e89e11ae6..d15593d2071 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Expm1.OP_NAME, + inputsClass = Expm1.Inputs.class +) @Operator( group = "math" ) @@ -59,8 +65,8 @@ public final class Expm1 extends RawOp implements Operand { private Output y; - private Expm1(Operation operation) { - super(operation); + public Expm1(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Expm1.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java index 95aa9162307..91e819c29a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Fact.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** * Output a fact about factorials. */ +@OpMetadata( + opType = Fact.OP_NAME, + inputsClass = Fact.Inputs.class +) @Operator( group = "math" ) @@ -44,8 +50,8 @@ public final class Fact extends RawOp implements Operand { private Output fact; - private Fact(Operation operation) { - super(operation); + public Fact(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; fact = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return fact; } + @OpInputsMetadata( + outputsClass = Fact.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new Fact(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java index a83480aa9b9..5d539cfe495 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Floor.OP_NAME, + inputsClass = Floor.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Floor extends RawOp implements Operand private Output y; - private Floor(Operation operation) { - super(operation); + public Floor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Floor.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java index bb8adeeb63d..619ad282bc9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = FloorDiv.OP_NAME, + inputsClass = FloorDiv.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class FloorDiv extends RawOp implements Operand private Output z; - private FloorDiv(Operation operation) { - super(operation); + public FloorDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = FloorDiv.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java index 61d7d5b5cfa..63963d5b3f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = FloorMod.OP_NAME, + inputsClass = FloorMod.Inputs.class +) @Operator( group = "math" ) @@ -51,8 +57,8 @@ public final class FloorMod extends RawOp implements Operand< private Output z; - private FloorMod(Operation operation) { - super(operation); + public FloorMod(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = FloorMod.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java index 57504b1d508..c566f003533 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Greater.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -47,6 +49,10 @@ * tf.math.greater(x, y) ==> [False, False, True] * */ +@OpMetadata( + opType = Greater.OP_NAME, + inputsClass = Greater.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class Greater extends RawOp implements Operand { private Output z; - private Greater(Operation operation) { - super(operation); + public Greater(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Greater.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java index 9a38c6b5fa8..64589450029 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/GreaterEqual.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -47,6 +49,10 @@ * tf.math.greater_equal(x, y) ==> [True, False, True, True] * */ +@OpMetadata( + opType = GreaterEqual.OP_NAME, + inputsClass = GreaterEqual.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class GreaterEqual extends RawOp implements Operand { private Output z; - private GreaterEqual(Operation operation) { - super(operation); + public GreaterEqual(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = GreaterEqual.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java index 5bb9bc21881..383c0d95434 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Igamma.OP_NAME, + inputsClass = Igamma.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Igamma extends RawOp implements Operand private Output z; - private Igamma(Operation operation) { - super(operation); + public Igamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Igamma.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java index f223cb5a1a1..058b27f96d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = IgammaGradA.OP_NAME, + inputsClass = IgammaGradA.Inputs.class +) public final class IgammaGradA extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class IgammaGradA extends RawOp implements Opera private Output z; - private IgammaGradA(Operation operation) { - super(operation); + public IgammaGradA(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = IgammaGradA.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java index 356f1129187..92ded48b24e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Igammac.OP_NAME, + inputsClass = Igammac.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Igammac extends RawOp implements Operand z; - private Igammac(Operation operation) { - super(operation); + public Igammac(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Igammac.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java index 10eb163fabe..8f1eedb2865 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Imag.OP_NAME, + inputsClass = Imag.Inputs.class +) @Operator( group = "math" ) @@ -59,8 +65,8 @@ public final class Imag extends RawOp implements Operand { private Output output; - private Imag(Operation operation) { - super(operation); + public Imag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Imag.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java index 570c0408231..b83e3c7687b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -47,6 +49,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = InvertPermutation.OP_NAME, + inputsClass = InvertPermutation.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class InvertPermutation extends RawOp implements private Output y; - private InvertPermutation(Operation operation) { - super(operation); + public InvertPermutation(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = InvertPermutation.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java index b134a350c34..db8c3cfac33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsFinite.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -43,6 +45,10 @@ * tf.math.is_finite(x) ==> [True, True, True, False, False] * */ +@OpMetadata( + opType = IsFinite.OP_NAME, + inputsClass = IsFinite.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class IsFinite extends RawOp implements Operand { private Output y; - private IsFinite(Operation operation) { - super(operation); + public IsFinite(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = IsFinite.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java index 1b111be0a5d..6d7a74a693c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsInf.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -43,6 +45,10 @@ * tf.math.is_inf(x) ==> [False, True, False, True] * */ +@OpMetadata( + opType = IsInf.OP_NAME, + inputsClass = IsInf.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class IsInf extends RawOp implements Operand { private Output y; - private IsInf(Operation operation) { - super(operation); + public IsInf(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = IsInf.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java index 5d3bbc59140..2313dc981f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IsNan.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -43,6 +45,10 @@ * tf.math.is_nan(x) ==> [False, True, False, True, False] * */ +@OpMetadata( + opType = IsNan.OP_NAME, + inputsClass = IsNan.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class IsNan extends RawOp implements Operand { private Output y; - private IsNan(Operation operation) { - super(operation); + public IsNan(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = IsNan.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java index ab28b8e688d..ed99ef4e7dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Less.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -47,6 +49,10 @@ * tf.math.less(x, y) ==> [False, True, True] * */ +@OpMetadata( + opType = Less.OP_NAME, + inputsClass = Less.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class Less extends RawOp implements Operand { private Output z; - private Less(Operation operation) { - super(operation); + public Less(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Less.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java index de450dd1f93..b090bd12112 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LessEqual.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -47,6 +49,10 @@ * tf.math.less_equal(x, y) ==> [True, True, True] * */ +@OpMetadata( + opType = LessEqual.OP_NAME, + inputsClass = LessEqual.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class LessEqual extends RawOp implements Operand { private Output z; - private LessEqual(Operation operation) { - super(operation); + public LessEqual(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = LessEqual.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java index 4881d6ca779..ee474789a0a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -43,6 +45,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Lgamma.OP_NAME, + inputsClass = Lgamma.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Lgamma extends RawOp implements Operand private Output y; - private Lgamma(Operation operation) { - super(operation); + public Lgamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Lgamma.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java index 098a1d0ff6a..ee2e976f3ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Log.OP_NAME, + inputsClass = Log.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class Log extends RawOp implements Operand { private Output y; - private Log(Operation operation) { - super(operation); + public Log(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Log.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java index 2c4a27b0be2..78e42d010d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Log1p.OP_NAME, + inputsClass = Log1p.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class Log1p extends RawOp implements Operand { private Output y; - private Log1p(Operation operation) { - super(operation); + public Log1p(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Log1p.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java index b2b1329ef87..455bc3db20c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalAnd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; @@ -35,6 +37,10 @@ * NOTE: {@code math.LogicalAnd} supports broadcasting. More about broadcasting * here */ +@OpMetadata( + opType = LogicalAnd.OP_NAME, + inputsClass = LogicalAnd.Inputs.class +) @Operator( group = "math" ) @@ -46,8 +52,8 @@ public final class LogicalAnd extends RawOp implements Operand { private Output z; - private LogicalAnd(Operation operation) { - super(operation); + public LogicalAnd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = LogicalAnd.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java index 65de99bbfcb..38e71e4cb83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalNot.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; /** * Returns the truth value of {@code NOT x} element-wise. */ +@OpMetadata( + opType = LogicalNot.OP_NAME, + inputsClass = LogicalNot.Inputs.class +) @Operator( group = "math" ) @@ -44,8 +50,8 @@ public final class LogicalNot extends RawOp implements Operand { private Output y; - private LogicalNot(Operation operation) { - super(operation); + public LogicalNot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = LogicalNot.class + ) public static class Inputs extends RawOpInputs { /** * A {@code Tensor} of type {@code bool}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java index 6c7b7c127db..d066fc83024 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/LogicalOr.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; @@ -35,6 +37,10 @@ * NOTE: {@code math.LogicalOr} supports broadcasting. More about broadcasting * here */ +@OpMetadata( + opType = LogicalOr.OP_NAME, + inputsClass = LogicalOr.Inputs.class +) @Operator( group = "math" ) @@ -46,8 +52,8 @@ public final class LogicalOr extends RawOp implements Operand { private Output z; - private LogicalOr(Operation operation) { - super(operation); + public LogicalOr(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = LogicalOr.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java index f467d171afc..0f94fa56c93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Maximum.OP_NAME, + inputsClass = Maximum.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Maximum extends RawOp implements Operand z; - private Maximum(Operation operation) { - super(operation); + public Maximum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Maximum.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java index 122567fd9ae..abc6db045a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Mean.OP_NAME, + inputsClass = Mean.Inputs.class +) @Operator( group = "math" ) @@ -52,8 +58,8 @@ public final class Mean extends RawOp implements Operand { private Output output; - private Mean(Operation operation) { - super(operation); + public Mean(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = Mean.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to reduce. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java index facf4cb1560..ebea75921af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Minimum.OP_NAME, + inputsClass = Minimum.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Minimum extends RawOp implements Operand z; - private Minimum(Operation operation) { - super(operation); + public Minimum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Minimum.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java index 8838ae84a78..2221ecce333 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Mod.OP_NAME, + inputsClass = Mod.Inputs.class +) @Operator( group = "math" ) @@ -51,8 +57,8 @@ public final class Mod extends RawOp implements Operand { private Output z; - private Mod(Operation operation) { - super(operation); + public Mod(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Mod.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java index 28cd75c9810..864e3d3fd6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Mul.OP_NAME, + inputsClass = Mul.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Mul extends RawOp implements Operand { private Output z; - private Mul(Operation operation) { - super(operation); + public Mul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Mul.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java index a5a904d2bd4..845947687f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = MulNoNan.OP_NAME, + inputsClass = MulNoNan.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class MulNoNan extends RawOp implements Operand private Output z; - private MulNoNan(Operation operation) { - super(operation); + public MulNoNan(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = MulNoNan.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java index 832e0e067ea..651438a8523 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Ndtri.OP_NAME, + inputsClass = Ndtri.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Ndtri extends RawOp implements Operand private Output y; - private Ndtri(Operation operation) { - super(operation); + public Ndtri(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Ndtri.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java index 961355ca9af..2124815e5c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Neg.OP_NAME, + inputsClass = Neg.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Neg extends RawOp implements Operand { private Output y; - private Neg(Operation operation) { - super(operation); + public Neg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Neg.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java index 3f96ce897d7..c53812ad539 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = NextAfter.OP_NAME, + inputsClass = NextAfter.Inputs.class +) @Operator( group = "math" ) @@ -52,8 +58,8 @@ public final class NextAfter extends RawOp implements Operand private Output output; - private NextAfter(Operation operation) { - super(operation); + public NextAfter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = NextAfter.class + ) public static class Inputs extends RawOpInputs> { /** * The x1 input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java index b579d881c1e..60ad0e8b773 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NotEqual.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -37,6 +39,10 @@ * NOTE: {@code math.NotEqual} supports broadcasting. More about broadcasting * here */ +@OpMetadata( + opType = NotEqual.OP_NAME, + inputsClass = NotEqual.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class NotEqual extends RawOp implements Operand { private Output z; - private NotEqual(Operation operation) { - super(operation); + public NotEqual(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options incompatibleShapeError(Boolean incompatibleShapeError) { } } + @OpInputsMetadata( + outputsClass = NotEqual.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java index 2b7aacb5503..a3d26a80452 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Polygamma.OP_NAME, + inputsClass = Polygamma.Inputs.class +) @Operator( group = "math" ) @@ -51,8 +57,8 @@ public final class Polygamma extends RawOp implements Operand private Output z; - private Polygamma(Operation operation) { - super(operation); + public Polygamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Polygamma.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java index c49751ed2ae..bb751df6597 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/PopulationCount.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TUint8; @@ -40,6 +42,10 @@ * {@code int32} or {@code int64} and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. */ +@OpMetadata( + opType = PopulationCount.OP_NAME, + inputsClass = PopulationCount.Inputs.class +) @Operator( group = "math" ) @@ -51,8 +57,8 @@ public final class PopulationCount extends RawOp implements Operand { private Output y; - private PopulationCount(Operation operation) { - super(operation); + public PopulationCount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = PopulationCount.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java index d7a3059dc65..dca15c8146d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Pow.OP_NAME, + inputsClass = Pow.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Pow extends RawOp implements Operand { private Output z; - private Pow(Operation operation) { - super(operation); + public Pow(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Pow.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java index 49e68db93ce..d120d55bf02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = QuantizedAdd.OP_NAME, + inputsClass = QuantizedAdd.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class QuantizedAdd extends RawOp { private Output maxZ; - private QuantizedAdd(Operation operation) { - super(operation); + public QuantizedAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); minZ = operation.output(outputIdx++); @@ -121,6 +127,9 @@ public Output maxZ() { return maxZ; } + @OpInputsMetadata( + outputsClass = QuantizedAdd.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java index 3861db85f3a..6042eb2e048 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = QuantizedMul.OP_NAME, + inputsClass = QuantizedMul.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class QuantizedMul extends RawOp { private Output maxZ; - private QuantizedMul(Operation operation) { - super(operation); + public QuantizedMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); minZ = operation.output(outputIdx++); @@ -121,6 +127,9 @@ public Output maxZ() { return maxZ; } + @OpInputsMetadata( + outputsClass = QuantizedMul.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java index b4d443f9083..e00013a7f4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Real.OP_NAME, + inputsClass = Real.Inputs.class +) @Operator( group = "math" ) @@ -59,8 +65,8 @@ public final class Real extends RawOp implements Operand { private Output output; - private Real(Operation operation) { - super(operation); + public Real(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Real.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java index 3b301133fa1..e3f48312429 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RealDiv.OP_NAME, + inputsClass = RealDiv.Inputs.class +) @Operator( group = "math" ) @@ -50,8 +56,8 @@ public final class RealDiv extends RawOp implements Operand private Output z; - private RealDiv(Operation operation) { - super(operation); + public RealDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RealDiv.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java index 637d25d959b..7518dd7bd6b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Reciprocal.OP_NAME, + inputsClass = Reciprocal.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Reciprocal extends RawOp implements Operand< private Output y; - private Reciprocal(Operation operation) { - super(operation); + public Reciprocal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Reciprocal.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java index 579fc948e26..24b41cfb5e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = ReciprocalGrad.OP_NAME, + inputsClass = ReciprocalGrad.Inputs.class +) public final class ReciprocalGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class ReciprocalGrad extends RawOp implements Oper private Output z; - private ReciprocalGrad(Operation operation) { - super(operation); + public ReciprocalGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = ReciprocalGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java index 121255c20a4..3d5a6435e08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizationRangePerChannel.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * Computes requantization range per channel. */ +@OpMetadata( + opType = RequantizationRangePerChannel.OP_NAME, + inputsClass = RequantizationRangePerChannel.Inputs.class +) public final class RequantizationRangePerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RequantizationRangePerChannel extends RawOp { private Output outputMax; - private RequantizationRangePerChannel(Operation operation) { - super(operation); + public RequantizationRangePerChannel(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputMin = operation.output(outputIdx++); outputMax = operation.output(outputIdx++); @@ -93,6 +99,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = RequantizationRangePerChannel.class + ) public static class Inputs extends RawOpInputs { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java index f3a513f4164..d8913c86743 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RequantizePerChannel.OP_NAME, + inputsClass = RequantizePerChannel.Inputs.class +) public final class RequantizePerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RequantizePerChannel extends RawOp { private Output outputMax; - private RequantizePerChannel(Operation operation) { - super(operation); + public RequantizePerChannel(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -114,6 +120,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = RequantizePerChannel.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java index 7014e1f8921..1886c3b06a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Rint.OP_NAME, + inputsClass = Rint.Inputs.class +) @Operator( group = "math" ) @@ -55,8 +61,8 @@ public final class Rint extends RawOp implements Operand { private Output y; - private Rint(Operation operation) { - super(operation); + public Rint(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Rint.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java index 1d079601ebd..ebde5c2358c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Round.OP_NAME, + inputsClass = Round.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Round extends RawOp implements Operand { private Output y; - private Round(Operation operation) { - super(operation); + public Round(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Round.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java index 9bf061a5192..081368e6f02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Rsqrt.OP_NAME, + inputsClass = Rsqrt.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Rsqrt extends RawOp implements Operand { private Output y; - private Rsqrt(Operation operation) { - super(operation); + public Rsqrt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Rsqrt.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java index 8874d5a8b00..16e114dc591 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RsqrtGrad.OP_NAME, + inputsClass = RsqrtGrad.Inputs.class +) public final class RsqrtGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RsqrtGrad extends RawOp implements Operand z; - private RsqrtGrad(Operation operation) { - super(operation); + public RsqrtGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RsqrtGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java index 57098d1f87e..167e9ecf905 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SegmentMax.OP_NAME, + inputsClass = SegmentMax.Inputs.class +) @Operator( group = "math" ) @@ -64,8 +70,8 @@ public final class SegmentMax extends RawOp implements Operan private Output output; - private SegmentMax(Operation operation) { - super(operation); + public SegmentMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SegmentMax.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java index afd9e32a207..715c0098dce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SegmentMean.OP_NAME, + inputsClass = SegmentMean.Inputs.class +) @Operator( group = "math" ) @@ -66,8 +72,8 @@ public final class SegmentMean extends RawOp implements Operand private Output output; - private SegmentMean(Operation operation) { - super(operation); + public SegmentMean(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -108,6 +114,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SegmentMean.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java index a32e370e3d3..b867b9fd972 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SegmentMin.OP_NAME, + inputsClass = SegmentMin.Inputs.class +) @Operator( group = "math" ) @@ -64,8 +70,8 @@ public final class SegmentMin extends RawOp implements Operan private Output output; - private SegmentMin(Operation operation) { - super(operation); + public SegmentMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SegmentMin.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java index 192a5367e3c..f2857db52cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -54,6 +56,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SegmentProd.OP_NAME, + inputsClass = SegmentProd.Inputs.class +) @Operator( group = "math" ) @@ -65,8 +71,8 @@ public final class SegmentProd extends RawOp implements Operand private Output output; - private SegmentProd(Operation operation) { - super(operation); + public SegmentProd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SegmentProd.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java index 10f814320f8..bfdd2182153 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -54,6 +56,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SegmentSum.OP_NAME, + inputsClass = SegmentSum.Inputs.class +) @Operator( group = "math" ) @@ -65,8 +71,8 @@ public final class SegmentSum extends RawOp implements Operand< private Output output; - private SegmentSum(Operation operation) { - super(operation); + public SegmentSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SegmentSum.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java index 2716b3cf8b8..d9ca11b4d48 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Sigmoid.OP_NAME, + inputsClass = Sigmoid.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Sigmoid extends RawOp implements Operand private Output y; - private Sigmoid(Operation operation) { - super(operation); + public Sigmoid(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Sigmoid.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java index 55d174c2eba..d89e6603f98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = SigmoidGrad.OP_NAME, + inputsClass = SigmoidGrad.Inputs.class +) public final class SigmoidGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class SigmoidGrad extends RawOp implements Operand private Output z; - private SigmoidGrad(Operation operation) { - super(operation); + public SigmoidGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = SigmoidGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java index 4acd2234734..e9673f31954 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Sign.OP_NAME, + inputsClass = Sign.Inputs.class +) @Operator( group = "math" ) @@ -58,8 +64,8 @@ public final class Sign extends RawOp implements Operand { private Output y; - private Sign(Operation operation) { - super(operation); + public Sign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Sign.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java index 2bf226a61ad..a174e6bfcb8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Sin.OP_NAME, + inputsClass = Sin.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Sin extends RawOp implements Operand { private Output y; - private Sin(Operation operation) { - super(operation); + public Sin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Sin.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java index 4486f6576fb..4d817c8cba8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Sinh.OP_NAME, + inputsClass = Sinh.Inputs.class +) @Operator( group = "math" ) @@ -54,8 +60,8 @@ public final class Sinh extends RawOp implements Operand { private Output y; - private Sinh(Operation operation) { - super(operation); + public Sinh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Sinh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java index 4e31c283009..3301f457584 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code samples} output */ +@OpMetadata( + opType = SobolSample.OP_NAME, + inputsClass = SobolSample.Inputs.class +) public final class SobolSample extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class SobolSample extends RawOp implements Opera private Output samples; - private SobolSample(Operation operation) { - super(operation); + public SobolSample(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; samples = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return samples; } + @OpInputsMetadata( + outputsClass = SobolSample.class + ) public static class Inputs extends RawOpInputs> { /** * Positive scalar {@code Tensor} representing each sample's dimension. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java index 75a59a3389e..081ba524a08 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Softplus.OP_NAME, + inputsClass = Softplus.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Softplus extends RawOp implements Operand< private Output activations; - private Softplus(Operation operation) { - super(operation); + public Softplus(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Softplus.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java index dd9f8637498..76caf9ca811 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = SoftplusGrad.OP_NAME, + inputsClass = SoftplusGrad.Inputs.class +) public final class SoftplusGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class SoftplusGrad extends RawOp implements Oper private Output backprops; - private SoftplusGrad(Operation operation) { - super(operation); + public SoftplusGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = SoftplusGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding softplus operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java index ffbc55ab620..93ce7497ba3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Sqrt.OP_NAME, + inputsClass = Sqrt.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Sqrt extends RawOp implements Operand { private Output y; - private Sqrt(Operation operation) { - super(operation); + public Sqrt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Sqrt.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java index 8bdef3778d0..44150536e1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = SqrtGrad.OP_NAME, + inputsClass = SqrtGrad.Inputs.class +) public final class SqrtGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class SqrtGrad extends RawOp implements Operand private Output z; - private SqrtGrad(Operation operation) { - super(operation); + public SqrtGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = SqrtGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java index 52f321ef0d5..36b966a671d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Square.OP_NAME, + inputsClass = Square.Inputs.class +) @Operator( group = "math" ) @@ -48,8 +54,8 @@ public final class Square extends RawOp implements Operand { private Output y; - private Square(Operation operation) { - super(operation); + public Square(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Square.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java index a71e97d7c5f..46b06c4daa0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = SquaredDifference.OP_NAME, + inputsClass = SquaredDifference.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class SquaredDifference extends RawOp implements O private Output z; - private SquaredDifference(Operation operation) { - super(operation); + public SquaredDifference(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = SquaredDifference.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java index c415449aefc..147016b4975 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Sub.OP_NAME, + inputsClass = Sub.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Sub extends RawOp implements Operand { private Output z; - private Sub(Operation operation) { - super(operation); + public Sub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Sub.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java index 80f4bb51b53..9c8f23a2e60 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Tan.OP_NAME, + inputsClass = Tan.Inputs.class +) @Operator( group = "math" ) @@ -55,8 +61,8 @@ public final class Tan extends RawOp implements Operand { private Output y; - private Tan(Operation operation) { - super(operation); + public Tan(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Tan.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java index b4a3cdbe76e..c9e2f43ca94 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -50,6 +52,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Tanh.OP_NAME, + inputsClass = Tanh.Inputs.class +) @Operator( group = "math" ) @@ -61,8 +67,8 @@ public final class Tanh extends RawOp implements Operand { private Output y; - private Tanh(Operation operation) { - super(operation); + public Tanh(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Tanh.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java index 287eda24c21..7dd6d1ef4d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = TanhGrad.OP_NAME, + inputsClass = TanhGrad.Inputs.class +) public final class TanhGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class TanhGrad extends RawOp implements Operand private Output z; - private TanhGrad(Operation operation) { - super(operation); + public TanhGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = TanhGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java index 04461701a1e..ece8ab49c44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = TruncateDiv.OP_NAME, + inputsClass = TruncateDiv.Inputs.class +) @Operator( group = "math" ) @@ -53,8 +59,8 @@ public final class TruncateDiv extends RawOp implements Operand private Output z; - private TruncateDiv(Operation operation) { - super(operation); + public TruncateDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = TruncateDiv.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java index 5e0aeadeaae..3fa76d9f178 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = TruncateMod.OP_NAME, + inputsClass = TruncateMod.Inputs.class +) @Operator( group = "math" ) @@ -50,8 +56,8 @@ public final class TruncateMod extends RawOp implements Opera private Output z; - private TruncateMod(Operation operation) { - super(operation); + public TruncateMod(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = TruncateMod.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java index fd872df8540..785503c0af5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -59,6 +61,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UnsortedSegmentMax.OP_NAME, + inputsClass = UnsortedSegmentMax.Inputs.class +) @Operator( group = "math" ) @@ -70,8 +76,8 @@ public final class UnsortedSegmentMax extends RawOp implement private Output output; - private UnsortedSegmentMax(Operation operation) { - super(operation); + public UnsortedSegmentMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnsortedSegmentMax.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java index 5a010ae3b5f..541a0f15b47 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -56,6 +58,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UnsortedSegmentMin.OP_NAME, + inputsClass = UnsortedSegmentMin.Inputs.class +) @Operator( group = "math" ) @@ -67,8 +73,8 @@ public final class UnsortedSegmentMin extends RawOp implement private Output output; - private UnsortedSegmentMin(Operation operation) { - super(operation); + public UnsortedSegmentMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -111,6 +117,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnsortedSegmentMin.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java index e54296d0a47..4d80fde16df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -56,6 +58,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UnsortedSegmentProd.OP_NAME, + inputsClass = UnsortedSegmentProd.Inputs.class +) @Operator( group = "math" ) @@ -67,8 +73,8 @@ public final class UnsortedSegmentProd extends RawOp implements private Output output; - private UnsortedSegmentProd(Operation operation) { - super(operation); + public UnsortedSegmentProd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -111,6 +117,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnsortedSegmentProd.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java index 037711460b0..8c271e284e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -58,6 +60,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = UnsortedSegmentSum.OP_NAME, + inputsClass = UnsortedSegmentSum.Inputs.class +) @Operator( group = "math" ) @@ -69,8 +75,8 @@ public final class UnsortedSegmentSum extends RawOp implements private Output output; - private UnsortedSegmentSum(Operation operation) { - super(operation); + public UnsortedSegmentSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -113,6 +119,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnsortedSegmentSum.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java index c0cea7d21f7..8640e808f4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Xdivy.OP_NAME, + inputsClass = Xdivy.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Xdivy extends RawOp implements Operand { private Output z; - private Xdivy(Operation operation) { - super(operation); + public Xdivy(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Xdivy.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java index 75936967d7d..d69db380541 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Xlog1py.OP_NAME, + inputsClass = Xlog1py.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Xlog1py extends RawOp implements Operand private Output z; - private Xlog1py(Operation operation) { - super(operation); + public Xlog1py(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Xlog1py.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java index da05a27ba3a..1eae3d1e39d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Xlogy.OP_NAME, + inputsClass = Xlogy.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class Xlogy extends RawOp implements Operand { private Output z; - private Xlogy(Operation operation) { - super(operation); + public Xlogy(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Xlogy.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java index 8a7fb4ebcbc..8c04a80e767 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = Zeta.OP_NAME, + inputsClass = Zeta.Inputs.class +) @Operator( group = "math" ) @@ -49,8 +55,8 @@ public final class Zeta extends RawOp implements Operand { private Output z; - private Zeta(Operation operation) { - super(operation); + public Zeta(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = Zeta.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java index 81c9afb6a49..a5cbcdecd88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = erfinv.OP_NAME, + inputsClass = erfinv.Inputs.class +) @Operator( group = "math" ) @@ -47,8 +53,8 @@ public final class erfinv extends RawOp implements Operand private Output y; - private erfinv(Operation operation) { - super(operation); + public erfinv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = erfinv.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java index a9b0a239f7b..8c600b907c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselJ0.OP_NAME, + inputsClass = BesselJ0.Inputs.class +) public final class BesselJ0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselJ0 extends RawOp implements Operand< private Output y; - private BesselJ0(Operation operation) { - super(operation); + public BesselJ0(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselJ0.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java index 2f4b8d6b71e..52a0694d668 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselJ1.OP_NAME, + inputsClass = BesselJ1.Inputs.class +) public final class BesselJ1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselJ1 extends RawOp implements Operand< private Output y; - private BesselJ1(Operation operation) { - super(operation); + public BesselJ1(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselJ1.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java index 13bf915e021..60a575f23f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselK0.OP_NAME, + inputsClass = BesselK0.Inputs.class +) public final class BesselK0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselK0 extends RawOp implements Operand< private Output y; - private BesselK0(Operation operation) { - super(operation); + public BesselK0(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselK0.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java index 0f611baf8df..0cb07b3796c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselK0e.OP_NAME, + inputsClass = BesselK0e.Inputs.class +) public final class BesselK0e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselK0e extends RawOp implements Operand private Output y; - private BesselK0e(Operation operation) { - super(operation); + public BesselK0e(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselK0e.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java index 5696e8ca364..10ca43255fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselK1.OP_NAME, + inputsClass = BesselK1.Inputs.class +) public final class BesselK1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselK1 extends RawOp implements Operand< private Output y; - private BesselK1(Operation operation) { - super(operation); + public BesselK1(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselK1.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java index ce0de01e5fc..f0b189f96a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselK1e.OP_NAME, + inputsClass = BesselK1e.Inputs.class +) public final class BesselK1e extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselK1e extends RawOp implements Operand private Output y; - private BesselK1e(Operation operation) { - super(operation); + public BesselK1e(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselK1e.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java index 79bdc102e59..bc9f338820b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselY0.OP_NAME, + inputsClass = BesselY0.Inputs.class +) public final class BesselY0 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselY0 extends RawOp implements Operand< private Output y; - private BesselY0(Operation operation) { - super(operation); + public BesselY0(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselY0.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java index 25c1343ccf2..0e0c7550d90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = BesselY1.OP_NAME, + inputsClass = BesselY1.Inputs.class +) public final class BesselY1 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class BesselY1 extends RawOp implements Operand< private Output y; - private BesselY1(Operation operation) { - super(operation); + public BesselY1(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = BesselY1.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java index f801b241a46..c7a1c1c80bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Dawsn.OP_NAME, + inputsClass = Dawsn.Inputs.class +) public final class Dawsn extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class Dawsn extends RawOp implements Operand private Output y; - private Dawsn(Operation operation) { - super(operation); + public Dawsn(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Dawsn.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java index ed852574a64..01ae1c2b6b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Expint.OP_NAME, + inputsClass = Expint.Inputs.class +) public final class Expint extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class Expint extends RawOp implements Operand private Output y; - private Expint(Operation operation) { - super(operation); + public Expint(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Expint.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java index b7433ee47b5..b43796ba4c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = FresnelCos.OP_NAME, + inputsClass = FresnelCos.Inputs.class +) public final class FresnelCos extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class FresnelCos extends RawOp implements Operan private Output y; - private FresnelCos(Operation operation) { - super(operation); + public FresnelCos(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = FresnelCos.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java index 2c3ac5e9469..ccf8573fb13 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = FresnelSin.OP_NAME, + inputsClass = FresnelSin.Inputs.class +) public final class FresnelSin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class FresnelSin extends RawOp implements Operan private Output y; - private FresnelSin(Operation operation) { - super(operation); + public FresnelSin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = FresnelSin.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java index 64128da2664..48ded64c429 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = Spence.OP_NAME, + inputsClass = Spence.Inputs.class +) public final class Spence extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class Spence extends RawOp implements Operand private Output y; - private Spence(Operation operation) { - super(operation); + public Spence(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = Spence.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java index 4496333ee52..464436c169f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AvgPool.OP_NAME, + inputsClass = AvgPool.Inputs.class +) @Operator( group = "nn" ) @@ -50,8 +56,8 @@ public final class AvgPool extends RawOp implements Operand output; - private AvgPool(Operation operation) { - super(operation); + public AvgPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -149,6 +155,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = AvgPool.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java index 6a42d550dfa..4add48fb958 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AvgPool3d.OP_NAME, + inputsClass = AvgPool3d.Inputs.class +) @Operator( group = "nn" ) @@ -50,8 +56,8 @@ public final class AvgPool3d extends RawOp implements Operand private Output output; - private AvgPool3d(Operation operation) { - super(operation); + public AvgPool3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = AvgPool3d.class + ) public static class Inputs extends RawOpInputs> { /** * Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java index 359c151cf8a..c5f30586478 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AvgPool3dGrad.OP_NAME, + inputsClass = AvgPool3dGrad.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class AvgPool3dGrad extends RawOp implements Ope private Output output; - private AvgPool3dGrad(Operation operation) { - super(operation); + public AvgPool3dGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -153,6 +159,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = AvgPool3dGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The original input dimensions. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java index 980664e35e1..ed699ac9669 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AvgPoolGrad.OP_NAME, + inputsClass = AvgPoolGrad.Inputs.class +) public final class AvgPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class AvgPoolGrad extends RawOp implements Opera private Output output; - private AvgPoolGrad(Operation operation) { - super(operation); + public AvgPoolGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = AvgPoolGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. Shape of the original input to {@code avg_pool}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java index bf1a0975705..3188b4698c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code result} output */ +@OpMetadata( + opType = BatchNormWithGlobalNormalization.OP_NAME, + inputsClass = BatchNormWithGlobalNormalization.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class BatchNormWithGlobalNormalization extends Raw private Output result; - private BatchNormWithGlobalNormalization(Operation operation) { - super(operation); + public BatchNormWithGlobalNormalization(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; result = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return result; } + @OpInputsMetadata( + outputsClass = BatchNormWithGlobalNormalization.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D input Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java index 86a0ac56fa1..b14ed684364 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code dx} output */ +@OpMetadata( + opType = BatchNormWithGlobalNormalizationGrad.OP_NAME, + inputsClass = BatchNormWithGlobalNormalizationGrad.Inputs.class +) @Operator( group = "nn" ) @@ -56,8 +62,8 @@ public final class BatchNormWithGlobalNormalizationGrad extends private Output dg; - private BatchNormWithGlobalNormalizationGrad(Operation operation) { - super(operation); + public BatchNormWithGlobalNormalizationGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; dx = operation.output(outputIdx++); dm = operation.output(outputIdx++); @@ -149,6 +155,9 @@ public Output dg() { return dg; } + @OpInputsMetadata( + outputsClass = BatchNormWithGlobalNormalizationGrad.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D input Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java index 0e0917f0422..7f7617a1114 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BiasAdd.OP_NAME, + inputsClass = BiasAdd.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class BiasAdd extends RawOp implements Operand private Output output; - private BiasAdd(Operation operation) { - super(operation); + public BiasAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = BiasAdd.class + ) public static class Inputs extends RawOpInputs> { /** * Any number of dimensions. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java index b9eda9a3c68..100f71935c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BiasAddGrad.OP_NAME, + inputsClass = BiasAddGrad.Inputs.class +) @Operator( group = "nn" ) @@ -50,8 +56,8 @@ public final class BiasAddGrad extends RawOp implements Operand private Output output; - private BiasAddGrad(Operation operation) { - super(operation); + public BiasAddGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -139,6 +145,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = BiasAddGrad.class + ) public static class Inputs extends RawOpInputs> { /** * Any number of dimensions. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java index 187ad0505ed..bb71ef9c7b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -56,6 +58,10 @@ * * @param data type for {@code i} output */ +@OpMetadata( + opType = BlockLSTM.OP_NAME, + inputsClass = BlockLSTM.Inputs.class +) public final class BlockLSTM extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -76,8 +82,8 @@ public final class BlockLSTM extends RawOp { private Output h; - private BlockLSTM(Operation operation) { - super(operation); + public BlockLSTM(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; i = operation.output(outputIdx++); cs = operation.output(outputIdx++); @@ -252,6 +258,9 @@ public Options usePeephole(Boolean usePeephole) { } } + @OpInputsMetadata( + outputsClass = BlockLSTM.class + ) public static class Inputs extends RawOpInputs> { /** * Maximum time length actually used by this input. Outputs are padded diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java index f794390c585..5cecd1948c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code x_grad} output */ +@OpMetadata( + opType = BlockLSTMGrad.OP_NAME, + inputsClass = BlockLSTMGrad.Inputs.class +) public final class BlockLSTMGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -59,8 +65,8 @@ public final class BlockLSTMGrad extends RawOp { private Output bGrad; - private BlockLSTMGrad(Operation operation) { - super(operation); + public BlockLSTMGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; xGrad = operation.output(outputIdx++); csPrevGrad = operation.output(outputIdx++); @@ -202,6 +208,9 @@ public Output bGrad() { return bGrad; } + @OpInputsMetadata( + outputsClass = BlockLSTMGrad.class + ) public static class Inputs extends RawOpInputs> { /** * Maximum time length actually used by this input. Outputs are padded diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java index cab3b859657..e19e16773df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CTCLossV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -36,6 +38,10 @@ * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. */ +@OpMetadata( + opType = CTCLossV2.OP_NAME, + inputsClass = CTCLossV2.Inputs.class +) public final class CTCLossV2 extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class CTCLossV2 extends RawOp { private Output gradient; - private CTCLossV2(Operation operation) { - super(operation); + public CTCLossV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; loss = operation.output(outputIdx++); gradient = operation.output(outputIdx++); @@ -200,6 +206,9 @@ public Options ignoreLongerOutputsThanInputs(Boolean ignoreLongerOutputsThanInpu } } + @OpInputsMetadata( + outputsClass = CTCLossV2.class + ) public static class Inputs extends RawOpInputs { /** * 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. Default blank diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java index 55e64bbc19f..080a5b71d85 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ComputeAccidentalHits.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * the effect of 'removing' the sampled labels that match the true labels by * making the classifier sure that they are sampled labels. */ +@OpMetadata( + opType = ComputeAccidentalHits.OP_NAME, + inputsClass = ComputeAccidentalHits.Inputs.class +) @Operator( group = "nn" ) @@ -54,8 +60,8 @@ public final class ComputeAccidentalHits extends RawOp { private Output weights; - private ComputeAccidentalHits(Operation operation) { - super(operation); + public ComputeAccidentalHits(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; indices = operation.output(outputIdx++); ids = operation.output(outputIdx++); @@ -181,6 +187,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = ComputeAccidentalHits.class + ) public static class Inputs extends RawOpInputs { /** * The true_classes output of UnpackSparseLabels. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java index a769c324177..8649cc0b54c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv2d.OP_NAME, + inputsClass = Conv2d.Inputs.class +) @Operator( group = "nn" ) @@ -68,8 +74,8 @@ public final class Conv2d extends RawOp implements Operand private Output output; - private Conv2d(Operation operation) { - super(operation); + public Conv2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -324,6 +330,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv2d.class + ) public static class Inputs extends RawOpInputs> { /** * A 4-D tensor. The dimension order is interpreted according to the value diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java index b5718f2ca8a..6fb5caec5ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv2dBackpropFilter.OP_NAME, + inputsClass = Conv2dBackpropFilter.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class Conv2dBackpropFilter extends RawOp impleme private Output output; - private Conv2dBackpropFilter(Operation operation) { - super(operation); + public Conv2dBackpropFilter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -310,6 +316,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv2dBackpropFilter.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, in_channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java index f629ba8e03a..400d03fa9f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv2dBackpropInput.OP_NAME, + inputsClass = Conv2dBackpropInput.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class Conv2dBackpropInput extends RawOp implemen private Output output; - private Conv2dBackpropInput(Operation operation) { - super(operation); + public Conv2dBackpropInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -309,6 +315,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv2dBackpropInput.class + ) public static class Inputs extends RawOpInputs> { /** * An integer vector representing the shape of {@code input}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java index aeefc06db8e..e9c3ffb04af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv3d.OP_NAME, + inputsClass = Conv3d.Inputs.class +) @Operator( group = "nn" ) @@ -52,8 +58,8 @@ public final class Conv3d extends RawOp implements Operand private Output output; - private Conv3d(Operation operation) { - super(operation); + public Conv3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -215,6 +221,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv3d.class + ) public static class Inputs extends RawOpInputs> { /** * Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java index a788a51fb92..b79bc9181ff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv3dBackpropFilter.OP_NAME, + inputsClass = Conv3dBackpropFilter.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class Conv3dBackpropFilter extends RawOp impleme private Output output; - private Conv3dBackpropFilter(Operation operation) { - super(operation); + public Conv3dBackpropFilter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -218,6 +224,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv3dBackpropFilter.class + ) public static class Inputs extends RawOpInputs> { /** * Shape {@code [batch, depth, rows, cols, in_channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java index 0cda7f2020c..5e9324aaf0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv3dBackpropInput.OP_NAME, + inputsClass = Conv3dBackpropInput.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class Conv3dBackpropInput extends RawOp implemen private Output output; - private Conv3dBackpropInput(Operation operation) { - super(operation); + public Conv3dBackpropInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -217,6 +223,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = Conv3dBackpropInput.class + ) public static class Inputs extends RawOpInputs> { /** * An integer vector representing the tensor shape of {@code input}, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java index b9cab99e0a5..ffee6939d9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -44,6 +46,10 @@ * * @param data type for {@code log_probability} output */ +@OpMetadata( + opType = CtcBeamSearchDecoder.OP_NAME, + inputsClass = CtcBeamSearchDecoder.Inputs.class +) @Operator( group = "nn" ) @@ -62,8 +68,8 @@ public final class CtcBeamSearchDecoder extends RawOp { private Output logProbability; @SuppressWarnings("unchecked") - private CtcBeamSearchDecoder(Operation operation) { - super(operation); + public CtcBeamSearchDecoder(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int decodedIndicesLength = operation.outputListLength("decoded_indices"); decodedIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, decodedIndicesLength)); @@ -183,6 +189,9 @@ public Options mergeRepeated(Boolean mergeRepeated) { } } + @OpInputsMetadata( + outputsClass = CtcBeamSearchDecoder.class + ) public static class Inputs extends RawOpInputs> { /** * 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java index 3975061991c..6ed1bfb1fac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * * @param data type for {@code log_probability} output */ +@OpMetadata( + opType = CtcGreedyDecoder.OP_NAME, + inputsClass = CtcGreedyDecoder.Inputs.class +) @Operator( group = "nn" ) @@ -63,8 +69,8 @@ public final class CtcGreedyDecoder extends RawOp { private Output logProbability; - private CtcGreedyDecoder(Operation operation) { - super(operation); + public CtcGreedyDecoder(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; decodedIndices = operation.output(outputIdx++); decodedValues = operation.output(outputIdx++); @@ -197,6 +203,9 @@ public Options blankIndex(Long blankIndex) { } } + @OpInputsMetadata( + outputsClass = CtcGreedyDecoder.class + ) public static class Inputs extends RawOpInputs> { /** * 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java index f70a3763d9e..ca47ae6d968 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code loss} output */ +@OpMetadata( + opType = CtcLoss.OP_NAME, + inputsClass = CtcLoss.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class CtcLoss extends RawOp { private Output gradient; - private CtcLoss(Operation operation) { - super(operation); + public CtcLoss(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; loss = operation.output(outputIdx++); gradient = operation.output(outputIdx++); @@ -207,6 +213,9 @@ public Options ignoreLongerOutputsThanInputs(Boolean ignoreLongerOutputsThanInpu } } + @OpInputsMetadata( + outputsClass = CtcLoss.class + ) public static class Inputs extends RawOpInputs> { /** * 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java index 68e42dfd916..22a652fed1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -73,6 +75,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CudnnRNN.OP_NAME, + inputsClass = CudnnRNN.Inputs.class +) public final class CudnnRNN extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -90,8 +96,8 @@ public final class CudnnRNN extends RawOp { private Output hostReserved; @SuppressWarnings("unchecked") - private CudnnRNN(Operation operation) { - super(operation); + public CudnnRNN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputH = operation.output(outputIdx++); @@ -419,6 +425,9 @@ public Options timeMajor(Boolean timeMajor) { } } + @OpInputsMetadata( + outputsClass = CudnnRNN.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java index 13372cc1bbc..fb56c1f0f07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -83,6 +85,10 @@ * * @param data type for {@code input_backprop} output */ +@OpMetadata( + opType = CudnnRNNBackprop.OP_NAME, + inputsClass = CudnnRNNBackprop.Inputs.class +) public final class CudnnRNNBackprop extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -97,8 +103,8 @@ public final class CudnnRNNBackprop extends RawOp { private Output paramsBackprop; - private CudnnRNNBackprop(Operation operation) { - super(operation); + public CudnnRNNBackprop(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; inputBackprop = operation.output(outputIdx++); inputHBackprop = operation.output(outputIdx++); @@ -408,6 +414,9 @@ public Options timeMajor(Boolean timeMajor) { } } + @OpInputsMetadata( + outputsClass = CudnnRNNBackprop.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java index eeda8931a7b..f720ab18d24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -66,6 +68,10 @@ * * @param data type for {@code params} output */ +@OpMetadata( + opType = CudnnRNNCanonicalToParams.OP_NAME, + inputsClass = CudnnRNNCanonicalToParams.Inputs.class +) @Operator( group = "nn" ) @@ -77,8 +83,8 @@ public final class CudnnRNNCanonicalToParams extends RawOp im private Output params; - private CudnnRNNCanonicalToParams(Operation operation) { - super(operation); + public CudnnRNNCanonicalToParams(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; params = operation.output(outputIdx++); } @@ -319,6 +325,9 @@ public Options numProj(Long numProj) { } } + @OpInputsMetadata( + outputsClass = CudnnRNNCanonicalToParams.class + ) public static class Inputs extends RawOpInputs> { /** * The numLayers input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java index 4cfdf128902..f3c981ae1ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -66,6 +68,10 @@ * * @param data type for {@code weights} output */ +@OpMetadata( + opType = CudnnRNNParamsToCanonical.OP_NAME, + inputsClass = CudnnRNNParamsToCanonical.Inputs.class +) @Operator( group = "nn" ) @@ -80,8 +86,8 @@ public final class CudnnRNNParamsToCanonical extends RawOp { private List> biases; @SuppressWarnings("unchecked") - private CudnnRNNParamsToCanonical(Operation operation) { - super(operation); + public CudnnRNNParamsToCanonical(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int weightsLength = operation.outputListLength("weights"); weights = Arrays.asList((Output[]) operation.outputList(outputIdx, weightsLength)); @@ -333,6 +339,9 @@ public Options numProj(Long numProj) { } } + @OpInputsMetadata( + outputsClass = CudnnRNNParamsToCanonical.class + ) public static class Inputs extends RawOpInputs> { /** * The numLayers input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java index 2dd40e8cfad..76e7384ef36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -58,6 +60,10 @@ * * @param data type for {@code params_size} output */ +@OpMetadata( + opType = CudnnRnnParamsSize.OP_NAME, + inputsClass = CudnnRnnParamsSize.Inputs.class +) @Operator( group = "nn" ) @@ -69,8 +75,8 @@ public final class CudnnRnnParamsSize extends RawOp implement private Output paramsSize; - private CudnnRnnParamsSize(Operation operation) { - super(operation); + public CudnnRnnParamsSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; paramsSize = operation.output(outputIdx++); } @@ -312,6 +318,9 @@ public Options numProj(Long numProj) { } } + @OpInputsMetadata( + outputsClass = CudnnRnnParamsSize.class + ) public static class Inputs extends RawOpInputs> { /** * The numLayers input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java index 95cf7c41dd0..cb39229fe25 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = DataFormatDimMap.OP_NAME, + inputsClass = DataFormatDimMap.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class DataFormatDimMap extends RawOp implements private Output y; - private DataFormatDimMap(Operation operation) { - super(operation); + public DataFormatDimMap(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -152,6 +158,9 @@ public Options dstFormat(String dstFormat) { } } + @OpInputsMetadata( + outputsClass = DataFormatDimMap.class + ) public static class Inputs extends RawOpInputs> { /** * A Tensor with each element as a dimension index in source data format. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java index df1a3825d4b..208a59c355d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -55,6 +57,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = DataFormatVecPermute.OP_NAME, + inputsClass = DataFormatVecPermute.Inputs.class +) @Operator( group = "nn" ) @@ -66,8 +72,8 @@ public final class DataFormatVecPermute extends RawOp impleme private Output y; - private DataFormatVecPermute(Operation operation) { - super(operation); + public DataFormatVecPermute(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -169,6 +175,9 @@ public Options dstFormat(String dstFormat) { } } + @OpInputsMetadata( + outputsClass = DataFormatVecPermute.class + ) public static class Inputs extends RawOpInputs> { /** * Vector of size 4 or Tensor of shape (4, 2) in source data format. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java index 2d2d46b500b..73440f2a0f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -110,6 +112,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DepthToSpace.OP_NAME, + inputsClass = DepthToSpace.Inputs.class +) @Operator( group = "nn" ) @@ -121,8 +127,8 @@ public final class DepthToSpace extends RawOp implements Operan private Output output; - private DepthToSpace(Operation operation) { - super(operation); + public DepthToSpace(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -200,6 +206,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = DepthToSpace.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java index 929cd0737ae..2abafeda8d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DepthwiseConv2dNative.OP_NAME, + inputsClass = DepthwiseConv2dNative.Inputs.class +) @Operator( group = "nn" ) @@ -64,8 +70,8 @@ public final class DepthwiseConv2dNative extends RawOp implem private Output output; - private DepthwiseConv2dNative(Operation operation) { - super(operation); + public DepthwiseConv2dNative(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -278,6 +284,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = DepthwiseConv2dNative.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java index 9e80ec91386..2389f0dc99a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DepthwiseConv2dNativeBackpropFilter.OP_NAME, + inputsClass = DepthwiseConv2dNativeBackpropFilter.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class DepthwiseConv2dNativeBackpropFilter extend private Output output; - private DepthwiseConv2dNativeBackpropFilter(Operation operation) { - super(operation); + public DepthwiseConv2dNativeBackpropFilter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -274,6 +280,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = DepthwiseConv2dNativeBackpropFilter.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape based on {@code data_format}. For example, if diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java index 57baa455eaf..cf8f88d0be2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DepthwiseConv2dNativeBackpropInput.OP_NAME, + inputsClass = DepthwiseConv2dNativeBackpropInput.Inputs.class +) @Operator( group = "nn" ) @@ -49,8 +55,8 @@ public final class DepthwiseConv2dNativeBackpropInput extends private Output output; - private DepthwiseConv2dNativeBackpropInput(Operation operation) { - super(operation); + public DepthwiseConv2dNativeBackpropInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -274,6 +280,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = DepthwiseConv2dNativeBackpropInput.class + ) public static class Inputs extends RawOpInputs> { /** * An integer vector representing the shape of {@code input}, based diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java index 2eced92585f..e17b172eb20 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -58,6 +60,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Dilation2d.OP_NAME, + inputsClass = Dilation2d.Inputs.class +) @Operator( group = "nn" ) @@ -69,8 +75,8 @@ public final class Dilation2d extends RawOp implements Operan private Output output; - private Dilation2d(Operation operation) { - super(operation); + public Dilation2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -125,6 +131,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Dilation2d.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java index baa0e803e8a..8c8c3baebc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code filter_backprop} output */ +@OpMetadata( + opType = Dilation2dBackpropFilter.OP_NAME, + inputsClass = Dilation2dBackpropFilter.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class Dilation2dBackpropFilter extends RawOp imp private Output filterBackprop; - private Dilation2dBackpropFilter(Operation operation) { - super(operation); + public Dilation2dBackpropFilter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; filterBackprop = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return filterBackprop; } + @OpInputsMetadata( + outputsClass = Dilation2dBackpropFilter.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java index 2a11f34c905..ef317847166 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code in_backprop} output */ +@OpMetadata( + opType = Dilation2dBackpropInput.OP_NAME, + inputsClass = Dilation2dBackpropInput.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class Dilation2dBackpropInput extends RawOp impl private Output inBackprop; - private Dilation2dBackpropInput(Operation operation) { - super(operation); + public Dilation2dBackpropInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; inBackprop = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return inBackprop; } + @OpInputsMetadata( + outputsClass = Dilation2dBackpropInput.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java index 0060e99d5f3..4995249e8d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -56,6 +58,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Elu.OP_NAME, + inputsClass = Elu.Inputs.class +) @Operator( group = "nn" ) @@ -67,8 +73,8 @@ public final class Elu extends RawOp implements Operand { private Output activations; - private Elu(Operation operation) { - super(operation); + public Elu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -104,6 +110,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Elu.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java index 7d3af9d8535..b62f09ade82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = EluGrad.OP_NAME, + inputsClass = EluGrad.Inputs.class +) public final class EluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class EluGrad extends RawOp implements Operand backprops; - private EluGrad(Operation operation) { - super(operation); + public EluGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = EluGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding Elu operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java index d488683a39b..f3f1204f574 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FixedUnigramCandidateSampler.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -46,6 +48,10 @@ * the sampled candidates must be chosen independently of the context and of the * true labels. */ +@OpMetadata( + opType = FixedUnigramCandidateSampler.OP_NAME, + inputsClass = FixedUnigramCandidateSampler.Inputs.class +) @Operator( group = "nn" ) @@ -61,8 +67,8 @@ public final class FixedUnigramCandidateSampler extends RawOp { private Output sampledExpectedCount; - private FixedUnigramCandidateSampler(Operation operation) { - super(operation); + public FixedUnigramCandidateSampler(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sampledCandidates = operation.output(outputIdx++); trueExpectedCount = operation.output(outputIdx++); @@ -416,6 +422,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = FixedUnigramCandidateSampler.class + ) public static class Inputs extends RawOpInputs { /** * A batch_size * num_true matrix, in which each row contains the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java index ce678467253..362e0e58796 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FractionalAvgPool.OP_NAME, + inputsClass = FractionalAvgPool.Inputs.class +) @Operator( group = "nn" ) @@ -57,8 +63,8 @@ public final class FractionalAvgPool extends RawOp { private Output colPoolingSequence; - private FractionalAvgPool(Operation operation) { - super(operation); + public FractionalAvgPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); rowPoolingSequence = operation.output(outputIdx++); @@ -288,6 +294,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = FractionalAvgPool.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java index 0cfd7f8e7d2..26f45e63842 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FractionalAvgPoolGrad.OP_NAME, + inputsClass = FractionalAvgPoolGrad.Inputs.class +) public final class FractionalAvgPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class FractionalAvgPoolGrad extends RawOp implem private Output output; - private FractionalAvgPoolGrad(Operation operation) { - super(operation); + public FractionalAvgPoolGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options overlapping(Boolean overlapping) { } } + @OpInputsMetadata( + outputsClass = FractionalAvgPoolGrad.class + ) public static class Inputs extends RawOpInputs> { /** * Original input tensor shape for {@code fractional_avg_pool} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java index f1733ed28cc..9764fcbc919 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -64,6 +66,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FractionalMaxPool.OP_NAME, + inputsClass = FractionalMaxPool.Inputs.class +) @Operator( group = "nn" ) @@ -79,8 +85,8 @@ public final class FractionalMaxPool extends RawOp { private Output colPoolingSequence; - private FractionalMaxPool(Operation operation) { - super(operation); + public FractionalMaxPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); rowPoolingSequence = operation.output(outputIdx++); @@ -310,6 +316,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = FractionalMaxPool.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java index 5233a6b19f1..2b0623418d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FractionalMaxPoolGrad.OP_NAME, + inputsClass = FractionalMaxPoolGrad.Inputs.class +) public final class FractionalMaxPoolGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class FractionalMaxPoolGrad extends RawOp implem private Output output; - private FractionalMaxPoolGrad(Operation operation) { - super(operation); + public FractionalMaxPoolGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -143,6 +149,9 @@ public Options overlapping(Boolean overlapping) { } } + @OpInputsMetadata( + outputsClass = FractionalMaxPoolGrad.class + ) public static class Inputs extends RawOpInputs> { /** * Original input for {@code fractional_max_pool} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java index a216c600ff7..08d83f5a099 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code batch_mean} output */ +@OpMetadata( + opType = FusedBatchNorm.OP_NAME, + inputsClass = FusedBatchNorm.Inputs.class +) @Operator( group = "nn" ) @@ -61,8 +67,8 @@ public final class FusedBatchNorm extends private Output reserveSpace3; - private FusedBatchNorm(Operation operation) { - super(operation); + public FusedBatchNorm(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); batchMean = operation.output(outputIdx++); @@ -280,6 +286,9 @@ public Options isTraining(Boolean isTraining) { } } + @OpInputsMetadata( + outputsClass = FusedBatchNorm.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D Tensor for input data. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java index a423903c502..f1b7dc8eb91 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -41,6 +43,10 @@ * * @param data type for {@code scale_backprop} output */ +@OpMetadata( + opType = FusedBatchNormGrad.OP_NAME, + inputsClass = FusedBatchNormGrad.Inputs.class +) @Operator( group = "nn" ) @@ -60,8 +66,8 @@ public final class FusedBatchNormGrad exte private Output reserveSpace5; - private FusedBatchNormGrad(Operation operation) { - super(operation); + public FusedBatchNormGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; xBackprop = operation.output(outputIdx++); scaleBackprop = operation.output(outputIdx++); @@ -250,6 +256,9 @@ public Options isTraining(Boolean isTraining) { } } + @OpInputsMetadata( + outputsClass = FusedBatchNormGrad.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D Tensor for the gradient with respect to y. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java index 3d938544bd8..0d7a0589f98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FusedPadConv2d.OP_NAME, + inputsClass = FusedPadConv2d.Inputs.class +) @Operator( group = "nn" ) @@ -60,8 +66,8 @@ public final class FusedPadConv2d extends RawOp implements Op private Output output; - private FusedPadConv2d(Operation operation) { - super(operation); + public FusedPadConv2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -116,6 +122,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = FusedPadConv2d.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, in_channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java index 1d072dbf388..f0253a8ddd5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = FusedResizeAndPadConv2d.OP_NAME, + inputsClass = FusedResizeAndPadConv2d.Inputs.class +) @Operator( group = "nn" ) @@ -59,8 +65,8 @@ public final class FusedResizeAndPadConv2d extends RawOp impl private Output output; - private FusedResizeAndPadConv2d(Operation operation) { - super(operation); + public FusedResizeAndPadConv2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options resizeAlignCorners(Boolean resizeAlignCorners) { } } + @OpInputsMetadata( + outputsClass = FusedResizeAndPadConv2d.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, in_height, in_width, in_channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java index a78808f3977..b122981e655 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -73,6 +75,10 @@ * * @param data type for {@code r} output */ +@OpMetadata( + opType = GRUBlockCell.OP_NAME, + inputsClass = GRUBlockCell.Inputs.class +) public final class GRUBlockCell extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -87,8 +93,8 @@ public final class GRUBlockCell extends RawOp { private Output h; - private GRUBlockCell(Operation operation) { - super(operation); + public GRUBlockCell(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; r = operation.output(outputIdx++); u = operation.output(outputIdx++); @@ -160,6 +166,9 @@ public Output h() { return h; } + @OpInputsMetadata( + outputsClass = GRUBlockCell.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java index b2a324662d1..e46ece87c86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -108,6 +110,10 @@ * * @param data type for {@code d_x} output */ +@OpMetadata( + opType = GRUBlockCellGrad.OP_NAME, + inputsClass = GRUBlockCellGrad.Inputs.class +) public final class GRUBlockCellGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -122,8 +128,8 @@ public final class GRUBlockCellGrad extends RawOp { private Output dRBarUBar; - private GRUBlockCellGrad(Operation operation) { - super(operation); + public GRUBlockCellGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; dX = operation.output(outputIdx++); dHPrev = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Output dRBarUBar() { return dRBarUBar; } + @OpInputsMetadata( + outputsClass = GRUBlockCellGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java index 43b2d73ac08..11641113983 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InTopK.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -47,6 +49,10 @@ * \(out_i\) be the output for example {@code i}, *

    $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ */ +@OpMetadata( + opType = InTopK.OP_NAME, + inputsClass = InTopK.Inputs.class +) @Operator( group = "nn" ) @@ -58,8 +64,8 @@ public final class InTopK extends RawOp implements Operand { private Output precision; - private InTopK(Operation operation) { - super(operation); + public InTopK(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; precision = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return precision; } + @OpInputsMetadata( + outputsClass = InTopK.class + ) public static class Inputs extends RawOpInputs { /** * A {@code batch_size} x {@code classes} tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java index bb095a412bb..5f541190f38 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = InvGrad.OP_NAME, + inputsClass = InvGrad.Inputs.class +) public final class InvGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class InvGrad extends RawOp implements Operand private Output z; - private InvGrad(Operation operation) { - super(operation); + public InvGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = InvGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The y input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java index 2da787ebe0b..bc03d392173 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = IsotonicRegression.OP_NAME, + inputsClass = IsotonicRegression.Inputs.class +) public final class IsotonicRegression extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class IsotonicRegression extends RawOp { private Output segments; - private IsotonicRegression(Operation operation) { - super(operation); + public IsotonicRegression(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); segments = operation.output(outputIdx++); @@ -107,6 +113,9 @@ public Output segments() { return segments; } + @OpInputsMetadata( + outputsClass = IsotonicRegression.class + ) public static class Inputs extends RawOpInputs> { /** * A (batch_size, dim)-tensor holding a batch of inputs. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java index e4b2fcf5d47..7e59f9de758 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = L2Loss.OP_NAME, + inputsClass = L2Loss.Inputs.class +) @Operator( group = "nn" ) @@ -51,8 +57,8 @@ public final class L2Loss extends RawOp implements Operand private Output output; - private L2Loss(Operation operation) { - super(operation); + public L2Loss(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = L2Loss.class + ) public static class Inputs extends RawOpInputs> { /** * Typically 2-D, but may have any dimensions. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java index 4f3067b63e7..2c302edcfb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -57,6 +59,10 @@ * * @param data type for {@code i} output */ +@OpMetadata( + opType = LSTMBlockCell.OP_NAME, + inputsClass = LSTMBlockCell.Inputs.class +) public final class LSTMBlockCell extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -77,8 +83,8 @@ public final class LSTMBlockCell extends RawOp { private Output h; - private LSTMBlockCell(Operation operation) { - super(operation); + public LSTMBlockCell(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; i = operation.output(outputIdx++); cs = operation.output(outputIdx++); @@ -276,6 +282,9 @@ public Options usePeephole(Boolean usePeephole) { } } + @OpInputsMetadata( + outputsClass = LSTMBlockCell.class + ) public static class Inputs extends RawOpInputs> { /** * The input to the LSTM cell, shape (batch_size, num_inputs). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java index b20e2482520..b8cda9d4399 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code cs_prev_grad} output */ +@OpMetadata( + opType = LSTMBlockCellGrad.OP_NAME, + inputsClass = LSTMBlockCellGrad.Inputs.class +) public final class LSTMBlockCellGrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class LSTMBlockCellGrad extends RawOp { private Output wcoGrad; - private LSTMBlockCellGrad(Operation operation) { - super(operation); + public LSTMBlockCellGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; csPrevGrad = operation.output(outputIdx++); dicfo = operation.output(outputIdx++); @@ -159,6 +165,9 @@ public Output wcoGrad() { return wcoGrad; } + @OpInputsMetadata( + outputsClass = LSTMBlockCellGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The input to the LSTM cell, shape (batch_size, num_inputs). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java index b02c71ea25a..67b86b50c6b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = LeakyRelu.OP_NAME, + inputsClass = LeakyRelu.Inputs.class +) @Operator( group = "nn" ) @@ -47,8 +53,8 @@ public final class LeakyRelu extends RawOp implements Operand private Output activations; - private LeakyRelu(Operation operation) { - super(operation); + public LeakyRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -124,6 +130,9 @@ public Options alpha(Float alpha) { } } + @OpInputsMetadata( + outputsClass = LeakyRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java index 0e0c5633f34..009e3d7f0ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LearnedUnigramCandidateSampler.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * the sampled candidates must be chosen independently of the context and of the * true labels. */ +@OpMetadata( + opType = LearnedUnigramCandidateSampler.OP_NAME, + inputsClass = LearnedUnigramCandidateSampler.Inputs.class +) @Operator( group = "nn" ) @@ -56,8 +62,8 @@ public final class LearnedUnigramCandidateSampler extends RawOp { private Output sampledExpectedCount; - private LearnedUnigramCandidateSampler(Operation operation) { - super(operation); + public LearnedUnigramCandidateSampler(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sampledCandidates = operation.output(outputIdx++); trueExpectedCount = operation.output(outputIdx++); @@ -194,6 +200,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = LearnedUnigramCandidateSampler.class + ) public static class Inputs extends RawOpInputs { /** * A batch_size * num_true matrix, in which each row contains the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java index 826c443cf64..04bced1cffe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -47,6 +49,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = LocalResponseNormalization.OP_NAME, + inputsClass = LocalResponseNormalization.Inputs.class +) @Operator( group = "nn" ) @@ -58,8 +64,8 @@ public final class LocalResponseNormalization extends RawOp i private Output output; - private LocalResponseNormalization(Operation operation) { - super(operation); + public LocalResponseNormalization(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -213,6 +219,9 @@ public Options beta(Float beta) { } } + @OpInputsMetadata( + outputsClass = LocalResponseNormalization.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java index d3ef3925776..1aad5086f85 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = LocalResponseNormalizationGrad.OP_NAME, + inputsClass = LocalResponseNormalizationGrad.Inputs.class +) public final class LocalResponseNormalizationGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class LocalResponseNormalizationGrad extends Raw private Output output; - private LocalResponseNormalizationGrad(Operation operation) { - super(operation); + public LocalResponseNormalizationGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -202,6 +208,9 @@ public Options beta(Float beta) { } } + @OpInputsMetadata( + outputsClass = LocalResponseNormalizationGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java index 82f178f0e1a..0773cdb9edb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code logsoftmax} output */ +@OpMetadata( + opType = LogSoftmax.OP_NAME, + inputsClass = LogSoftmax.Inputs.class +) @Operator( group = "nn" ) @@ -51,8 +57,8 @@ public final class LogSoftmax extends RawOp implements Operan private Output logsoftmax; - private LogSoftmax(Operation operation) { - super(operation); + public LogSoftmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; logsoftmax = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return logsoftmax; } + @OpInputsMetadata( + outputsClass = LogSoftmax.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D with shape {@code [batch_size, num_classes]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java index 52286432580..21b206fdbf3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPool.OP_NAME, + inputsClass = MaxPool.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPool extends RawOp implements Operand output; - private MaxPool(Operation operation) { - super(operation); + public MaxPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPool.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D input to pool over. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java index 5d9bfdffa7d..b5b81c23295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPool3d.OP_NAME, + inputsClass = MaxPool3d.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPool3d extends RawOp implements Operand private Output output; - private MaxPool3d(Operation operation) { - super(operation); + public MaxPool3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -149,6 +155,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPool3d.class + ) public static class Inputs extends RawOpInputs> { /** * Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java index 2bfe76491b4..63d7ff3aad9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPool3dGrad.OP_NAME, + inputsClass = MaxPool3dGrad.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPool3dGrad extends RawOp implements Ope private Output output; - private MaxPool3dGrad(Operation operation) { - super(operation); + public MaxPool3dGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -155,6 +161,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPool3dGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java index 0db33576167..81e4cb166d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPool3dGradGrad.OP_NAME, + inputsClass = MaxPool3dGradGrad.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPool3dGradGrad extends RawOp implements private Output output; - private MaxPool3dGradGrad(Operation operation) { - super(operation); + public MaxPool3dGradGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPool3dGradGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java index a2e15973e80..7dfe407119e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPoolGrad.OP_NAME, + inputsClass = MaxPoolGrad.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPoolGrad extends RawOp implements Opera private Output output; - private MaxPoolGrad(Operation operation) { - super(operation); + public MaxPoolGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -145,6 +151,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPoolGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java index baeb0c83a39..6f4addd3a9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPoolGradGrad.OP_NAME, + inputsClass = MaxPoolGradGrad.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPoolGradGrad extends RawOp implements O private Output output; - private MaxPoolGradGrad(Operation operation) { - super(operation); + public MaxPoolGradGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -145,6 +151,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = MaxPoolGradGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java index e9acc1d5cc9..4f9fe9c1bae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPoolGradGradWithArgmax.OP_NAME, + inputsClass = MaxPoolGradGradWithArgmax.Inputs.class +) @Operator( group = "nn" ) @@ -48,8 +54,8 @@ public final class MaxPoolGradGradWithArgmax extends RawOp im private Output output; - private MaxPoolGradGradWithArgmax(Operation operation) { - super(operation); + public MaxPoolGradGradWithArgmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options includeBatchInIndex(Boolean includeBatchInIndex) { } } + @OpInputsMetadata( + outputsClass = MaxPoolGradGradWithArgmax.class + ) public static class Inputs extends RawOpInputs> { /** * The original input. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java index b2fd0acb7e3..d1a7131e2ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = MaxPoolGradWithArgmax.OP_NAME, + inputsClass = MaxPoolGradWithArgmax.Inputs.class +) public final class MaxPoolGradWithArgmax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class MaxPoolGradWithArgmax extends RawOp implem private Output output; - private MaxPoolGradWithArgmax(Operation operation) { - super(operation); + public MaxPoolGradWithArgmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options includeBatchInIndex(Boolean includeBatchInIndex) { } } + @OpInputsMetadata( + outputsClass = MaxPoolGradWithArgmax.class + ) public static class Inputs extends RawOpInputs> { /** * The original input. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java index 42e6dfbefb5..73b54f6f150 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code argmax} output */ +@OpMetadata( + opType = MaxPoolWithArgmax.OP_NAME, + inputsClass = MaxPoolWithArgmax.Inputs.class +) @Operator( group = "nn" ) @@ -62,8 +68,8 @@ public final class MaxPoolWithArgmax exten private Output argmax; - private MaxPoolWithArgmax(Operation operation) { - super(operation); + public MaxPoolWithArgmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); argmax = operation.output(outputIdx++); @@ -184,6 +190,9 @@ public Options includeBatchInIndex(Boolean includeBatchInIndex) { } } + @OpInputsMetadata( + outputsClass = MaxPoolWithArgmax.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java index 14df47bbcc2..7f4635a86ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -44,6 +46,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = NthElement.OP_NAME, + inputsClass = NthElement.Inputs.class +) @Operator( group = "nn" ) @@ -55,8 +61,8 @@ public final class NthElement extends RawOp implements Operan private Output values; - private NthElement(Operation operation) { - super(operation); + public NthElement(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; values = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Options reverse(Boolean reverse) { } } + @OpInputsMetadata( + outputsClass = NthElement.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher with last dimension at least {@code n+1}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java index bceac12fb5d..76313824c90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedAvgPool.OP_NAME, + inputsClass = QuantizedAvgPool.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class QuantizedAvgPool extends RawOp { private Output maxOutput; - private QuantizedAvgPool(Operation operation) { - super(operation); + public QuantizedAvgPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -127,6 +133,9 @@ public Output maxOutput() { return maxOutput; } + @OpInputsMetadata( + outputsClass = QuantizedAvgPool.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, channels]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java index 7ff416e12ef..7def1715bdc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -40,6 +42,10 @@ * * @param data type for {@code result} output */ +@OpMetadata( + opType = QuantizedBatchNormWithGlobalNormalization.OP_NAME, + inputsClass = QuantizedBatchNormWithGlobalNormalization.Inputs.class +) @Operator( group = "nn" ) @@ -55,8 +61,8 @@ public final class QuantizedBatchNormWithGlobalNormalization private Output resultMax; - private QuantizedBatchNormWithGlobalNormalization(Operation operation) { - super(operation); + public QuantizedBatchNormWithGlobalNormalization(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; result = operation.output(outputIdx++); resultMin = operation.output(outputIdx++); @@ -155,6 +161,9 @@ public Output resultMax() { return resultMax; } + @OpInputsMetadata( + outputsClass = QuantizedBatchNormWithGlobalNormalization.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D input Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java index a646479464f..4d44b9fadfc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedBiasAdd.OP_NAME, + inputsClass = QuantizedBiasAdd.Inputs.class +) @Operator( group = "nn" ) @@ -54,8 +60,8 @@ public final class QuantizedBiasAdd extends RawOp { private Output maxOut; - private QuantizedBiasAdd(Operation operation) { - super(operation); + public QuantizedBiasAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -121,6 +127,9 @@ public Output maxOut() { return maxOut; } + @OpInputsMetadata( + outputsClass = QuantizedBiasAdd.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java index ec7cfb17939..bccfc189f2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DAndRelu.OP_NAME, + inputsClass = QuantizedConv2DAndRelu.Inputs.class +) public final class QuantizedConv2DAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DAndRelu extends RawOp { private Output maxOutput; - private QuantizedConv2DAndRelu(Operation operation) { - super(operation); + public QuantizedConv2DAndRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -241,6 +247,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DAndRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java index c50cda07ac7..64113bb8c21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DAndReluAndRequantize.Inputs.class +) public final class QuantizedConv2DAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DAndReluAndRequantize extend private Output maxOutput; - private QuantizedConv2DAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -246,6 +252,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java index 8fcd6d5e6e6..c24962c7ca5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DAndRequantize.Inputs.class +) public final class QuantizedConv2DAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DAndRequantize extends RawOp private Output maxOutput; - private QuantizedConv2DAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -246,6 +252,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java index cd45b84d21f..594e907dd36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DPerChannel.OP_NAME, + inputsClass = QuantizedConv2DPerChannel.Inputs.class +) public final class QuantizedConv2DPerChannel extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DPerChannel extends RawOp { private Output maxOutput; - private QuantizedConv2DPerChannel(Operation operation) { - super(operation); + public QuantizedConv2DPerChannel(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -190,6 +196,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DPerChannel.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java index aae3f6b2ce1..9599efe4f8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBias.OP_NAME, + inputsClass = QuantizedConv2DWithBias.Inputs.class +) public final class QuantizedConv2DWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBias extends RawOp { private Output maxOutput; - private QuantizedConv2DWithBias(Operation operation) { - super(operation); + public QuantizedConv2DWithBias(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -243,6 +249,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBias.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java index 060fb88ec1f..63ff2e04d43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasAndRelu.OP_NAME, + inputsClass = QuantizedConv2DWithBiasAndRelu.Inputs.class +) public final class QuantizedConv2DWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasAndRelu extends Raw private Output maxOutput; - private QuantizedConv2DWithBiasAndRelu(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasAndRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -243,6 +249,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasAndRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java index 9709b6afc6d..e47f7adc8ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DWithBiasAndReluAndRequantize.Inputs.class +) public final class QuantizedConv2DWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasAndReluAndRequantize maxOutput; - private QuantizedConv2DWithBiasAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -248,6 +254,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java index 478048a5504..7e9062e953f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DWithBiasAndRequantize.Inputs.class +) public final class QuantizedConv2DWithBiasAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasAndRequantize exten private Output maxOutput; - private QuantizedConv2DWithBiasAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -248,6 +254,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java index e9e88ec8b2e..90627ac39fa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.Inputs.class +) public final class QuantizedConv2DWithBiasSignedSumAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasSignedSumAndReluAndRequantize maxOutput; - private QuantizedConv2DWithBiasSignedSumAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasSignedSumAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -255,6 +261,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java index b3178a7b7c9..53dd4316d87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasSumAndRelu.OP_NAME, + inputsClass = QuantizedConv2DWithBiasSumAndRelu.Inputs.class +) public final class QuantizedConv2DWithBiasSumAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasSumAndRelu extends private Output maxOutput; - private QuantizedConv2DWithBiasSumAndRelu(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasSumAndRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -245,6 +251,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasSumAndRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java index 0a173c9732d..c734b9c62a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2DWithBiasSumAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedConv2DWithBiasSumAndReluAndRequantize.Inputs.class +) public final class QuantizedConv2DWithBiasSumAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedConv2DWithBiasSumAndReluAndRequantize maxOutput; - private QuantizedConv2DWithBiasSumAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedConv2DWithBiasSumAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -255,6 +261,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2DWithBiasSumAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java index 6c6d3b373fd..ba70fb60b9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConv2d.OP_NAME, + inputsClass = QuantizedConv2d.Inputs.class +) @Operator( group = "nn" ) @@ -58,8 +64,8 @@ public final class QuantizedConv2d extends RawOp { private Output maxOutput; - private QuantizedConv2d(Operation operation) { - super(operation); + public QuantizedConv2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -215,6 +221,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = QuantizedConv2d.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java index 2471c3ab08f..6cd5a628445 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedDepthwiseConv2D.OP_NAME, + inputsClass = QuantizedDepthwiseConv2D.Inputs.class +) public final class QuantizedDepthwiseConv2D extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedDepthwiseConv2D extends RawOp { private Output maxOutput; - private QuantizedDepthwiseConv2D(Operation operation) { - super(operation); + public QuantizedDepthwiseConv2D(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -190,6 +196,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = QuantizedDepthwiseConv2D.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java index 4240f955a8d..fcd9d55596f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedDepthwiseConv2DWithBias.OP_NAME, + inputsClass = QuantizedDepthwiseConv2DWithBias.Inputs.class +) public final class QuantizedDepthwiseConv2DWithBias extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedDepthwiseConv2DWithBias extends R private Output maxOutput; - private QuantizedDepthwiseConv2DWithBias(Operation operation) { - super(operation); + public QuantizedDepthwiseConv2DWithBias(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = QuantizedDepthwiseConv2DWithBias.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java index b15046296b5..fedb1118f0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedDepthwiseConv2DWithBiasAndRelu.OP_NAME, + inputsClass = QuantizedDepthwiseConv2DWithBiasAndRelu.Inputs.class +) public final class QuantizedDepthwiseConv2DWithBiasAndRelu extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedDepthwiseConv2DWithBiasAndRelu ex private Output maxOutput; - private QuantizedDepthwiseConv2DWithBiasAndRelu(Operation operation) { - super(operation); + public QuantizedDepthwiseConv2DWithBiasAndRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -243,6 +249,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedDepthwiseConv2DWithBiasAndRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java index 8b39158ccee..db07ec87ba5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.OP_NAME, + inputsClass = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.Inputs.class +) public final class QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize maxOutput; - private QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(Operation operation) { - super(operation); + public QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -248,6 +254,9 @@ public Options paddingList(Long... paddingList) { } } + @OpInputsMetadata( + outputsClass = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The original input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java index 3fcc9015f32..6d23d7f2317 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -37,6 +39,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = QuantizedInstanceNorm.OP_NAME, + inputsClass = QuantizedInstanceNorm.Inputs.class +) @Operator( group = "nn" ) @@ -52,8 +58,8 @@ public final class QuantizedInstanceNorm extends RawOp { private Output yMax; - private QuantizedInstanceNorm(Operation operation) { - super(operation); + public QuantizedInstanceNorm(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); yMin = operation.output(outputIdx++); @@ -256,6 +262,9 @@ public Options minSeparation(Float minSeparation) { } } + @OpInputsMetadata( + outputsClass = QuantizedInstanceNorm.class + ) public static class Inputs extends RawOpInputs> { /** * A 4D input Tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java index 57d54424778..6d73566d040 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedMaxPool.OP_NAME, + inputsClass = QuantizedMaxPool.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class QuantizedMaxPool extends RawOp { private Output maxOutput; - private QuantizedMaxPool(Operation operation) { - super(operation); + public QuantizedMaxPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); minOutput = operation.output(outputIdx++); @@ -127,6 +133,9 @@ public Output maxOutput() { return maxOutput; } + @OpInputsMetadata( + outputsClass = QuantizedMaxPool.class + ) public static class Inputs extends RawOpInputs> { /** * The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java index 0f84eb38f99..19f43691d40 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = QuantizedRelu.OP_NAME, + inputsClass = QuantizedRelu.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class QuantizedRelu extends RawOp { private Output maxActivations; - private QuantizedRelu(Operation operation) { - super(operation); + public QuantizedRelu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); minActivations = operation.output(outputIdx++); @@ -113,6 +119,9 @@ public Output maxActivations() { return maxActivations; } + @OpInputsMetadata( + outputsClass = QuantizedRelu.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java index ff29633a598..1288ae41c1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = QuantizedRelu6.OP_NAME, + inputsClass = QuantizedRelu6.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class QuantizedRelu6 extends RawOp { private Output maxActivations; - private QuantizedRelu6(Operation operation) { - super(operation); + public QuantizedRelu6(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); minActivations = operation.output(outputIdx++); @@ -113,6 +119,9 @@ public Output maxActivations() { return maxActivations; } + @OpInputsMetadata( + outputsClass = QuantizedRelu6.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java index 0e479563b8b..c6fe479482f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -38,6 +40,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = QuantizedReluX.OP_NAME, + inputsClass = QuantizedReluX.Inputs.class +) @Operator( group = "nn" ) @@ -53,8 +59,8 @@ public final class QuantizedReluX extends RawOp { private Output maxActivations; - private QuantizedReluX(Operation operation) { - super(operation); + public QuantizedReluX(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); minActivations = operation.output(outputIdx++); @@ -115,6 +121,9 @@ public Output maxActivations() { return maxActivations; } + @OpInputsMetadata( + outputsClass = QuantizedReluX.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java index cb2a3ac068e..a8b4e9217c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -46,6 +48,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Relu.OP_NAME, + inputsClass = Relu.Inputs.class +) @Operator( group = "nn" ) @@ -57,8 +63,8 @@ public final class Relu extends RawOp implements Operand { private Output activations; - private Relu(Operation operation) { - super(operation); + public Relu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Relu.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java index 6a076656bd5..9df15d9675c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Relu6.OP_NAME, + inputsClass = Relu6.Inputs.class +) @Operator( group = "nn" ) @@ -47,8 +53,8 @@ public final class Relu6 extends RawOp implements Operand private Output activations; - private Relu6(Operation operation) { - super(operation); + public Relu6(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Relu6.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java index a661ce63ea0..e6b607bb202 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = Relu6Grad.OP_NAME, + inputsClass = Relu6Grad.Inputs.class +) public final class Relu6Grad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class Relu6Grad extends RawOp implements Operand private Output backprops; - private Relu6Grad(Operation operation) { - super(operation); + public Relu6Grad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = Relu6Grad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding Relu6 operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java index 88ac8d1f067..3c7550021fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = ReluGrad.OP_NAME, + inputsClass = ReluGrad.Inputs.class +) public final class ReluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class ReluGrad extends RawOp implements Operand< private Output backprops; - private ReluGrad(Operation operation) { - super(operation); + public ReluGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = ReluGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding Relu operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java index f4bde617501..3262f1086e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Selu.OP_NAME, + inputsClass = Selu.Inputs.class +) @Operator( group = "nn" ) @@ -52,8 +58,8 @@ public final class Selu extends RawOp implements Operand { private Output activations; - private Selu(Operation operation) { - super(operation); + public Selu(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Selu.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java index 4beb213c767..b7766d8292a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = SeluGrad.OP_NAME, + inputsClass = SeluGrad.Inputs.class +) public final class SeluGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class SeluGrad extends RawOp implements Operand< private Output backprops; - private SeluGrad(Operation operation) { - super(operation); + public SeluGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = SeluGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding Selu operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java index 5bd51a5ac65..de8d65a202f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code softmax} output */ +@OpMetadata( + opType = Softmax.OP_NAME, + inputsClass = Softmax.Inputs.class +) @Operator( group = "nn" ) @@ -51,8 +57,8 @@ public final class Softmax extends RawOp implements Operand softmax; - private Softmax(Operation operation) { - super(operation); + public Softmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; softmax = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return softmax; } + @OpInputsMetadata( + outputsClass = Softmax.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D with shape {@code [batch_size, num_classes]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 3d3cb903166..4c57032e423 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code loss} output */ +@OpMetadata( + opType = SoftmaxCrossEntropyWithLogits.OP_NAME, + inputsClass = SoftmaxCrossEntropyWithLogits.Inputs.class +) @Operator( group = "nn" ) @@ -50,8 +56,8 @@ public final class SoftmaxCrossEntropyWithLogits extends RawO private Output backprop; - private SoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); + public SoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; loss = operation.output(outputIdx++); backprop = operation.output(outputIdx++); @@ -97,6 +103,9 @@ public Output backprop() { return backprop; } + @OpInputsMetadata( + outputsClass = SoftmaxCrossEntropyWithLogits.class + ) public static class Inputs extends RawOpInputs> { /** * batch_size x num_classes matrix diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java index 9d17f898281..913c13b1975 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code activations} output */ +@OpMetadata( + opType = Softsign.OP_NAME, + inputsClass = Softsign.Inputs.class +) @Operator( group = "nn" ) @@ -47,8 +53,8 @@ public final class Softsign extends RawOp implements Operand< private Output activations; - private Softsign(Operation operation) { - super(operation); + public Softsign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; activations = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return activations; } + @OpInputsMetadata( + outputsClass = Softsign.class + ) public static class Inputs extends RawOpInputs> { /** * The features input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java index ba0d7d3c380..5d57b66c50c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code backprops} output */ +@OpMetadata( + opType = SoftsignGrad.OP_NAME, + inputsClass = SoftsignGrad.Inputs.class +) public final class SoftsignGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class SoftsignGrad extends RawOp implements Oper private Output backprops; - private SoftsignGrad(Operation operation) { - super(operation); + public SoftsignGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return backprops; } + @OpInputsMetadata( + outputsClass = SoftsignGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The backpropagated gradients to the corresponding softsign operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java index 31b8b5d34ab..9969a3a1921 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -101,6 +103,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SpaceToBatch.OP_NAME, + inputsClass = SpaceToBatch.Inputs.class +) @Operator( group = "nn" ) @@ -112,8 +118,8 @@ public final class SpaceToBatch extends RawOp implements Operan private Output output; - private SpaceToBatch(Operation operation) { - super(operation); + public SpaceToBatch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -163,6 +169,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SpaceToBatch.class + ) public static class Inputs extends RawOpInputs> { /** * 4-D with shape {@code [batch, height, width, depth]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java index 6fe32cec477..de02df239ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -104,6 +106,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SpaceToDepth.OP_NAME, + inputsClass = SpaceToDepth.Inputs.class +) @Operator( group = "nn" ) @@ -115,8 +121,8 @@ public final class SpaceToDepth extends RawOp implements Operan private Output output; - private SpaceToDepth(Operation operation) { - super(operation); + public SpaceToDepth(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -194,6 +200,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = SpaceToDepth.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 5935d7eeb8f..2e08e77aa55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code loss} output */ +@OpMetadata( + opType = SparseSoftmaxCrossEntropyWithLogits.OP_NAME, + inputsClass = SparseSoftmaxCrossEntropyWithLogits.Inputs.class +) @Operator( group = "nn" ) @@ -54,8 +60,8 @@ public final class SparseSoftmaxCrossEntropyWithLogits extend private Output backprop; - private SparseSoftmaxCrossEntropyWithLogits(Operation operation) { - super(operation); + public SparseSoftmaxCrossEntropyWithLogits(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; loss = operation.output(outputIdx++); backprop = operation.output(outputIdx++); @@ -100,6 +106,9 @@ public Output backprop() { return backprop; } + @OpInputsMetadata( + outputsClass = SparseSoftmaxCrossEntropyWithLogits.class + ) public static class Inputs extends RawOpInputs> { /** * batch_size x num_classes matrix diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java index d549f556f17..36ccbf81754 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = TopK.OP_NAME, + inputsClass = TopK.Inputs.class +) @Operator( group = "nn" ) @@ -59,8 +65,8 @@ public final class TopK extends RawOp { private Output indices; - private TopK(Operation operation) { - super(operation); + public TopK(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; values = operation.output(outputIdx++); indices = operation.output(outputIdx++); @@ -146,6 +152,9 @@ public Options sorted(Boolean sorted) { } } + @OpInputsMetadata( + outputsClass = TopK.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D or higher with last dimension at least {@code k}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java index 053689d7023..22f0ac7402c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -81,6 +83,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Dequantize.OP_NAME, + inputsClass = Dequantize.Inputs.class +) @Operator( group = "quantization" ) @@ -92,8 +98,8 @@ public final class Dequantize extends RawOp implements Operan private Output output; - private Dequantize(Operation operation) { - super(operation); + public Dequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -247,6 +253,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = Dequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java index 932f2420685..d48ce80ac57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -53,6 +55,10 @@ * *

    Quantization is called fake since the output is still in floating point. */ +@OpMetadata( + opType = FakeQuantWithMinMaxArgs.OP_NAME, + inputsClass = FakeQuantWithMinMaxArgs.Inputs.class +) @Operator( group = "quantization" ) @@ -64,8 +70,8 @@ public final class FakeQuantWithMinMaxArgs extends RawOp implements Operand outputs; - private FakeQuantWithMinMaxArgs(Operation operation) { - super(operation); + public FakeQuantWithMinMaxArgs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputs = operation.output(outputIdx++); } @@ -218,6 +224,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxArgs.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java index a87262d29e5..4d2dcf75930 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxArgsGradient.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** * Compute gradients for a FakeQuantWithMinMaxArgs operation. */ +@OpMetadata( + opType = FakeQuantWithMinMaxArgsGradient.OP_NAME, + inputsClass = FakeQuantWithMinMaxArgsGradient.Inputs.class +) @Operator( group = "quantization" ) @@ -44,8 +50,8 @@ public final class FakeQuantWithMinMaxArgsGradient extends RawOp implements Oper private Output backprops; - private FakeQuantWithMinMaxArgsGradient(Operation operation) { - super(operation); + public FakeQuantWithMinMaxArgsGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backprops = operation.output(outputIdx++); } @@ -201,6 +207,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxArgsGradient.class + ) public static class Inputs extends RawOpInputs { /** * Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java index d94a0b1442a..3e27cbdf9a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVars.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -56,6 +58,10 @@ *

    This operation has a gradient and thus allows for training {@code min} and {@code max} * values. */ +@OpMetadata( + opType = FakeQuantWithMinMaxVars.OP_NAME, + inputsClass = FakeQuantWithMinMaxVars.Inputs.class +) @Operator( group = "quantization" ) @@ -67,8 +73,8 @@ public final class FakeQuantWithMinMaxVars extends RawOp implements Operand outputs; - private FakeQuantWithMinMaxVars(Operation operation) { - super(operation); + public FakeQuantWithMinMaxVars(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputs = operation.output(outputIdx++); } @@ -173,6 +179,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxVars.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java index 38090987274..aa6f265322c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsGradient.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** * Compute gradients for a FakeQuantWithMinMaxVars operation. */ +@OpMetadata( + opType = FakeQuantWithMinMaxVarsGradient.OP_NAME, + inputsClass = FakeQuantWithMinMaxVarsGradient.Inputs.class +) @Operator( group = "quantization" ) @@ -48,8 +54,8 @@ public final class FakeQuantWithMinMaxVarsGradient extends RawOp { private Output backpropWrtMax; - private FakeQuantWithMinMaxVarsGradient(Operation operation) { - super(operation); + public FakeQuantWithMinMaxVarsGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backpropsWrtInput = operation.output(outputIdx++); backpropWrtMin = operation.output(outputIdx++); @@ -175,6 +181,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxVarsGradient.class + ) public static class Inputs extends RawOpInputs { /** * Backpropagated gradients above the FakeQuantWithMinMaxVars operation. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java index 8fba3a49f46..cafc1deed7e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannel.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; @@ -57,6 +59,10 @@ *

    This operation has a gradient and thus allows for training {@code min} and {@code max} * values. */ +@OpMetadata( + opType = FakeQuantWithMinMaxVarsPerChannel.OP_NAME, + inputsClass = FakeQuantWithMinMaxVarsPerChannel.Inputs.class +) @Operator( group = "quantization" ) @@ -68,8 +74,8 @@ public final class FakeQuantWithMinMaxVarsPerChannel extends RawOp implements Op private Output outputs; - private FakeQuantWithMinMaxVarsPerChannel(Operation operation) { - super(operation); + public FakeQuantWithMinMaxVarsPerChannel(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputs = operation.output(outputIdx++); } @@ -174,6 +180,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxVarsPerChannel.class + ) public static class Inputs extends RawOpInputs { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java index 5ee0cbf2450..7ac14adde54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/FakeQuantWithMinMaxVarsPerChannelGradient.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. */ +@OpMetadata( + opType = FakeQuantWithMinMaxVarsPerChannelGradient.OP_NAME, + inputsClass = FakeQuantWithMinMaxVarsPerChannelGradient.Inputs.class +) @Operator( group = "quantization" ) @@ -48,8 +54,8 @@ public final class FakeQuantWithMinMaxVarsPerChannelGradient extends RawOp { private Output backpropWrtMax; - private FakeQuantWithMinMaxVarsPerChannelGradient(Operation operation) { - super(operation); + public FakeQuantWithMinMaxVarsPerChannelGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; backpropsWrtInput = operation.output(outputIdx++); backpropWrtMin = operation.output(outputIdx++); @@ -179,6 +185,9 @@ public Options narrowRange(Boolean narrowRange) { } } + @OpInputsMetadata( + outputsClass = FakeQuantWithMinMaxVarsPerChannelGradient.class + ) public static class Inputs extends RawOpInputs { /** * Backpropagated gradients above the FakeQuantWithMinMaxVars operation, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java index d3223aa4e9d..cb1262782bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -129,6 +131,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Quantize.OP_NAME, + inputsClass = Quantize.Inputs.class +) @Operator( group = "quantization" ) @@ -144,8 +150,8 @@ public final class Quantize extends RawOp { private Output outputMax; - private Quantize(Operation operation) { - super(operation); + public Quantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -358,6 +364,9 @@ public Options ensureMinimumRange(Float ensureMinimumRange) { } } + @OpInputsMetadata( + outputsClass = Quantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java index 4209187b9ef..aa037696632 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizeAndDequantize.OP_NAME, + inputsClass = QuantizeAndDequantize.Inputs.class +) @Operator( group = "quantization" ) @@ -50,8 +56,8 @@ public final class QuantizeAndDequantize extends RawOp implem private Output output; - private QuantizeAndDequantize(Operation operation) { - super(operation); + public QuantizeAndDequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -211,6 +217,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = QuantizeAndDequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java index 4e2eff27b37..00948dce45b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizeAndDequantizeV3.OP_NAME, + inputsClass = QuantizeAndDequantizeV3.Inputs.class +) @Operator( group = "quantization" ) @@ -50,8 +56,8 @@ public final class QuantizeAndDequantizeV3 extends RawOp impl private Output output; - private QuantizeAndDequantizeV3(Operation operation) { - super(operation); + public QuantizeAndDequantizeV3(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -211,6 +217,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = QuantizeAndDequantizeV3.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java index 2b80e1cac70..64a32b38c9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizeAndDequantizeV4.OP_NAME, + inputsClass = QuantizeAndDequantizeV4.Inputs.class +) @Operator( group = "quantization" ) @@ -49,8 +55,8 @@ public final class QuantizeAndDequantizeV4 extends RawOp impl private Output output; - private QuantizeAndDequantizeV4(Operation operation) { - super(operation); + public QuantizeAndDequantizeV4(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -286,6 +292,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = QuantizeAndDequantizeV4.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor to quantize and then dequantize. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java index 7a70dd57298..8b3d8614840 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code input_backprop} output */ +@OpMetadata( + opType = QuantizeAndDequantizeV4Grad.OP_NAME, + inputsClass = QuantizeAndDequantizeV4Grad.Inputs.class +) @Operator( group = "quantization" ) @@ -53,8 +59,8 @@ public final class QuantizeAndDequantizeV4Grad extends RawOp private Output inputMaxBackprop; - private QuantizeAndDequantizeV4Grad(Operation operation) { - super(operation); + public QuantizeAndDequantizeV4Grad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; inputBackprop = operation.output(outputIdx++); inputMinBackprop = operation.output(outputIdx++); @@ -152,6 +158,9 @@ public Options axis(Long axis) { } } + @OpInputsMetadata( + outputsClass = QuantizeAndDequantizeV4Grad.class + ) public static class Inputs extends RawOpInputs> { /** * The gradients input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java index 20302f5e4ad..f59057ba298 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -57,6 +59,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizeDownAndShrinkRange.OP_NAME, + inputsClass = QuantizeDownAndShrinkRange.Inputs.class +) @Operator( group = "quantization" ) @@ -72,8 +78,8 @@ public final class QuantizeDownAndShrinkRange extends RawOp { private Output outputMax; - private QuantizeDownAndShrinkRange(Operation operation) { - super(operation); + public QuantizeDownAndShrinkRange(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -132,6 +138,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = QuantizeDownAndShrinkRange.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java index f95f901d0d2..fb59b8078ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = QuantizedConcat.OP_NAME, + inputsClass = QuantizedConcat.Inputs.class +) @Operator( group = "quantization" ) @@ -54,8 +60,8 @@ public final class QuantizedConcat extends RawOp { private Output outputMax; - private QuantizedConcat(Operation operation) { - super(operation); + public QuantizedConcat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -118,6 +124,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = QuantizedConcat.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The dimension along which to concatenate. Must be in the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java index b600d7992f9..8504967d26b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMulWithBiasAndDequantize.OP_NAME, + inputsClass = QuantizedMatMulWithBiasAndDequantize.Inputs.class +) public final class QuantizedMatMulWithBiasAndDequantize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class QuantizedMatMulWithBiasAndDequantize exten private Output out; - private QuantizedMatMulWithBiasAndDequantize(Operation operation) { - super(operation); + public QuantizedMatMulWithBiasAndDequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -195,6 +201,9 @@ public Options inputQuantMode(String inputQuantMode) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMulWithBiasAndDequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java index 2e8463dad54..3bd6f6b498c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = QuantizedMatMulWithBiasAndRequantize.OP_NAME, + inputsClass = QuantizedMatMulWithBiasAndRequantize.Inputs.class +) public final class QuantizedMatMulWithBiasAndRequantize extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class QuantizedMatMulWithBiasAndRequantize exten private Output maxOut; - private QuantizedMatMulWithBiasAndRequantize(Operation operation) { - super(operation); + public QuantizedMatMulWithBiasAndRequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); minOut = operation.output(outputIdx++); @@ -214,6 +220,9 @@ public Options inputQuantMode(String inputQuantMode) { } } + @OpInputsMetadata( + outputsClass = QuantizedMatMulWithBiasAndRequantize.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java index ad1515a3e56..8be9d51e564 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/RequantizationRange.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -39,6 +41,10 @@ * used to produce the {@code requested_output_min} and {@code requested_output_max} for * {@code Requantize}. */ +@OpMetadata( + opType = RequantizationRange.OP_NAME, + inputsClass = RequantizationRange.Inputs.class +) @Operator( group = "quantization" ) @@ -52,8 +58,8 @@ public final class RequantizationRange extends RawOp { private Output outputMax; - private RequantizationRange(Operation operation) { - super(operation); + public RequantizationRange(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputMin = operation.output(outputIdx++); outputMax = operation.output(outputIdx++); @@ -98,6 +104,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = RequantizationRange.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java index 0790db45399..ed07bdee180 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Requantize.OP_NAME, + inputsClass = Requantize.Inputs.class +) @Operator( group = "quantization" ) @@ -59,8 +65,8 @@ public final class Requantize extends RawOp { private Output outputMax; - private Requantize(Operation operation) { - super(operation); + public Requantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); outputMin = operation.output(outputIdx++); @@ -124,6 +130,9 @@ public Output outputMax() { return outputMax; } + @OpInputsMetadata( + outputsClass = Requantize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java index 0de4dea307e..26ae65c0bc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RaggedBincount.OP_NAME, + inputsClass = RaggedBincount.Inputs.class +) @Operator( group = "ragged" ) @@ -54,8 +60,8 @@ public final class RaggedBincount extends RawOp implements Op private Output output; - private RaggedBincount(Operation operation) { - super(operation); + public RaggedBincount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options binaryOutput(Boolean binaryOutput) { } } + @OpInputsMetadata( + outputsClass = RaggedBincount.class + ) public static class Inputs extends RawOpInputs> { /** * 1D int64 {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java index 6eff8ab6395..4a7f779ae18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = RaggedCountSparseOutput.OP_NAME, + inputsClass = RaggedCountSparseOutput.Inputs.class +) public final class RaggedCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RaggedCountSparseOutput extends RawOp { private Output outputDenseShape; - private RaggedCountSparseOutput(Operation operation) { - super(operation); + public RaggedCountSparseOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -181,6 +187,9 @@ public Options maxlength(Long maxlength) { } } + @OpInputsMetadata( + outputsClass = RaggedCountSparseOutput.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor containing the row splits of the ragged tensor to count. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java index 9336beb882a..554c33b24c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output_row_splits} output */ +@OpMetadata( + opType = RaggedCross.OP_NAME, + inputsClass = RaggedCross.Inputs.class +) public final class RaggedCross extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -51,8 +57,8 @@ public final class RaggedCross extends RawOp private Output outputRowSplits; - private RaggedCross(Operation operation) { - super(operation); + public RaggedCross(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputValues = operation.output(outputIdx++); outputRowSplits = operation.output(outputIdx++); @@ -124,6 +130,9 @@ public Output outputRowSplits() { return outputRowSplits; } + @OpInputsMetadata( + outputsClass = RaggedCross.class + ) public static class Inputs extends RawOpInputs> { /** * The values tensor for each RaggedTensor input. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java index 05747069d47..0f747675f76 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -58,6 +60,10 @@ * * @param data type for {@code output_dense_values} output */ +@OpMetadata( + opType = RaggedGather.OP_NAME, + inputsClass = RaggedGather.Inputs.class +) public final class RaggedGather extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -69,8 +75,8 @@ public final class RaggedGather extends RawO private Output outputDenseValues; @SuppressWarnings("unchecked") - private RaggedGather(Operation operation) { - super(operation); + public RaggedGather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputNestedSplitsLength = operation.outputListLength("output_nested_splits"); outputNestedSplits = Arrays.asList((Output[]) operation.outputList(outputIdx, outputNestedSplitsLength)); @@ -129,6 +135,9 @@ public Output outputDenseValues() { return outputDenseValues; } + @OpInputsMetadata( + outputsClass = RaggedGather.class + ) public static class Inputs extends RawOpInputs> { /** * The {@code nested_row_splits} tensors that define the row-partitioning for the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java index 98dcc92980a..3111907e994 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -52,6 +54,10 @@ * * @param data type for {@code rt_dense_values} output */ +@OpMetadata( + opType = RaggedRange.OP_NAME, + inputsClass = RaggedRange.Inputs.class +) public final class RaggedRange extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -62,8 +68,8 @@ public final class RaggedRange extends Raw private Output rtDenseValues; - private RaggedRange(Operation operation) { - super(operation); + public RaggedRange(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; rtNestedSplits = operation.output(outputIdx++); rtDenseValues = operation.output(outputIdx++); @@ -130,6 +136,9 @@ public Output rtDenseValues() { return rtDenseValues; } + @OpInputsMetadata( + outputsClass = RaggedRange.class + ) public static class Inputs extends RawOpInputs> { /** * The starts of each range. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java index e5d5e2c0d79..600b19b219f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -52,6 +54,10 @@ * * @param data type for {@code output_dense_values} output */ +@OpMetadata( + opType = RaggedTensorFromVariant.OP_NAME, + inputsClass = RaggedTensorFromVariant.Inputs.class +) public final class RaggedTensorFromVariant extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -63,8 +69,8 @@ public final class RaggedTensorFromVariant e private Output outputDenseValues; @SuppressWarnings("unchecked") - private RaggedTensorFromVariant(Operation operation) { - super(operation); + public RaggedTensorFromVariant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputNestedSplitsLength = operation.outputListLength("output_nested_splits"); outputNestedSplits = Arrays.asList((Output[]) operation.outputList(outputIdx, outputNestedSplitsLength)); @@ -143,6 +149,9 @@ public Output outputDenseValues() { return outputDenseValues; } + @OpInputsMetadata( + outputsClass = RaggedTensorFromVariant.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code variant} Tensor containing encoded {@code RaggedTensor}s. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java index 6c15ca7797c..aef6bb6c241 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code sparse_values} output */ +@OpMetadata( + opType = RaggedTensorToSparse.OP_NAME, + inputsClass = RaggedTensorToSparse.Inputs.class +) public final class RaggedTensorToSparse extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -53,8 +59,8 @@ public final class RaggedTensorToSparse extends RawOp { private Output sparseDenseShape; - private RaggedTensorToSparse(Operation operation) { - super(operation); + public RaggedTensorToSparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseIndices = operation.output(outputIdx++); sparseValues = operation.output(outputIdx++); @@ -108,6 +114,9 @@ public Output sparseDenseShape() { return sparseDenseShape; } + @OpInputsMetadata( + outputsClass = RaggedTensorToSparse.class + ) public static class Inputs extends RawOpInputs> { /** * The {@code row_splits} for the {@code RaggedTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java index 3ad46ad6aee..4dc3e6c890f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -54,6 +56,10 @@ * * @param data type for {@code result} output */ +@OpMetadata( + opType = RaggedTensorToTensor.OP_NAME, + inputsClass = RaggedTensorToTensor.Inputs.class +) public final class RaggedTensorToTensor extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -62,8 +68,8 @@ public final class RaggedTensorToTensor extends RawOp implement private Output result; - private RaggedTensorToTensor(Operation operation) { - super(operation); + public RaggedTensorToTensor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; result = operation.output(outputIdx++); } @@ -139,6 +145,9 @@ public Output asOutput() { return result; } + @OpInputsMetadata( + outputsClass = RaggedTensorToTensor.class + ) public static class Inputs extends RawOpInputs> { /** * The desired shape of the output tensor. If left unspecified (empty), diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java index 7d02bcaf565..42a12ffded1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariant.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -45,6 +47,10 @@ * is wrapped in a scalar {@code variant} Tensor. See {@code RaggedTensorFromVariant} for the * corresponding decoding logic. */ +@OpMetadata( + opType = RaggedTensorToVariant.OP_NAME, + inputsClass = RaggedTensorToVariant.Inputs.class +) public final class RaggedTensorToVariant extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -54,8 +60,8 @@ public final class RaggedTensorToVariant extends RawOp implements Operand private Output encodedRagged; @SuppressWarnings("unchecked") - private RaggedTensorToVariant(Operation operation) { - super(operation); + public RaggedTensorToVariant(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; encodedRagged = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return (Output) encodedRagged; } + @OpInputsMetadata( + outputsClass = RaggedTensorToVariant.class + ) public static class Inputs extends RawOpInputs { /** * A list of one or more Tensors representing the splits of the input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java index 92dd3f247d5..f381cb631a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code dense_values_grad} output */ +@OpMetadata( + opType = RaggedTensorToVariantGradient.OP_NAME, + inputsClass = RaggedTensorToVariantGradient.Inputs.class +) public final class RaggedTensorToVariantGradient extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class RaggedTensorToVariantGradient extends RawOp private Output denseValuesGrad; - private RaggedTensorToVariantGradient(Operation operation) { - super(operation); + public RaggedTensorToVariantGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; denseValuesGrad = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return denseValuesGrad; } + @OpInputsMetadata( + outputsClass = RaggedTensorToVariantGradient.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code variant} Tensor containing encoded {@code RaggedTensor} gradients. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java index c22544c3206..9f762d54632 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AllCandidateSampler.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * the sampled candidates must be chosen independently of the context and of the * true labels. */ +@OpMetadata( + opType = AllCandidateSampler.OP_NAME, + inputsClass = AllCandidateSampler.Inputs.class +) @Operator( group = "random" ) @@ -56,8 +62,8 @@ public final class AllCandidateSampler extends RawOp { private Output sampledExpectedCount; - private AllCandidateSampler(Operation operation) { - super(operation); + public AllCandidateSampler(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sampledCandidates = operation.output(outputIdx++); trueExpectedCount = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = AllCandidateSampler.class + ) public static class Inputs extends RawOpInputs { /** * A batch_size * num_true matrix, in which each row contains the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java index 44c47dd9bdf..3120b1f7946 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousRandomSeedGenerator.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; /** * The AnonymousRandomSeedGenerator operation */ +@OpMetadata( + opType = AnonymousRandomSeedGenerator.OP_NAME, + inputsClass = AnonymousRandomSeedGenerator.Inputs.class +) public final class AnonymousRandomSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class AnonymousRandomSeedGenerator extends RawOp { private Output deleter; @SuppressWarnings("unchecked") - private AnonymousRandomSeedGenerator(Operation operation) { - super(operation); + public AnonymousRandomSeedGenerator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); deleter = operation.output(outputIdx++); @@ -88,6 +94,9 @@ public Output deleter() { return deleter; } + @OpInputsMetadata( + outputsClass = AnonymousRandomSeedGenerator.class + ) public static class Inputs extends RawOpInputs { /** * The seed input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java index 09f83de9c76..e3e20b71bb4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/AnonymousSeedGenerator.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,6 +36,10 @@ /** * The AnonymousSeedGenerator operation */ +@OpMetadata( + opType = AnonymousSeedGenerator.OP_NAME, + inputsClass = AnonymousSeedGenerator.Inputs.class +) public final class AnonymousSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class AnonymousSeedGenerator extends RawOp { private Output deleter; @SuppressWarnings("unchecked") - private AnonymousSeedGenerator(Operation operation) { - super(operation); + public AnonymousSeedGenerator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); deleter = operation.output(outputIdx++); @@ -91,6 +97,9 @@ public Output deleter() { return deleter; } + @OpInputsMetadata( + outputsClass = AnonymousSeedGenerator.class + ) public static class Inputs extends RawOpInputs { /** * The seed input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java index 6dad0b8957e..146310df6e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteRandomSeedGenerator.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DeleteRandomSeedGenerator operation */ +@OpMetadata( + opType = DeleteRandomSeedGenerator.OP_NAME, + inputsClass = DeleteRandomSeedGenerator.Inputs.class +) public final class DeleteRandomSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "DeleteRandomSeedGenerator"; - private DeleteRandomSeedGenerator(Operation operation) { - super(operation); + public DeleteRandomSeedGenerator(Operation operation) { + super(operation, OP_NAME); } /** @@ -60,6 +66,9 @@ public static DeleteRandomSeedGenerator create(Scope scope, Operand { /** * The handle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java index 33bcd5d5714..37cb0c3ca8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DeleteSeedGenerator.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DeleteSeedGenerator operation */ +@OpMetadata( + opType = DeleteSeedGenerator.OP_NAME, + inputsClass = DeleteSeedGenerator.Inputs.class +) public final class DeleteSeedGenerator extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "DeleteSeedGenerator"; - private DeleteSeedGenerator(Operation operation) { - super(operation); + public DeleteSeedGenerator(Operation operation) { + super(operation, OP_NAME); } /** @@ -60,6 +66,9 @@ public static DeleteSeedGenerator create(Scope scope, Operand h return new DeleteSeedGenerator(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = DeleteSeedGenerator.class + ) public static class Inputs extends RawOpInputs { /** * The handle input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java index 0eafa99d317..8af88774a35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/DummySeedGenerator.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The DummySeedGenerator operation */ +@OpMetadata( + opType = DummySeedGenerator.OP_NAME, + inputsClass = DummySeedGenerator.Inputs.class +) public final class DummySeedGenerator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class DummySeedGenerator extends RawOp implements Operand { private Output handle; @SuppressWarnings("unchecked") - private DummySeedGenerator(Operation operation) { - super(operation); + public DummySeedGenerator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -76,6 +82,9 @@ public Output asOutput() { return (Output) handle; } + @OpInputsMetadata( + outputsClass = DummySeedGenerator.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new DummySeedGenerator(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java index b49dd8fcd72..2c51755d5d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/LogUniformCandidateSampler.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * the sampled candidates must be chosen independently of the context and of the * true labels. */ +@OpMetadata( + opType = LogUniformCandidateSampler.OP_NAME, + inputsClass = LogUniformCandidateSampler.Inputs.class +) @Operator( group = "random" ) @@ -56,8 +62,8 @@ public final class LogUniformCandidateSampler extends RawOp { private Output sampledExpectedCount; - private LogUniformCandidateSampler(Operation operation) { - super(operation); + public LogUniformCandidateSampler(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sampledCandidates = operation.output(outputIdx++); trueExpectedCount = operation.output(outputIdx++); @@ -194,6 +200,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = LogUniformCandidateSampler.class + ) public static class Inputs extends RawOpInputs { /** * A batch_size * num_true matrix, in which each row contains the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java index 14af4090bd8..9edf3fbebf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Multinomial.OP_NAME, + inputsClass = Multinomial.Inputs.class +) @Operator( group = "random" ) @@ -50,8 +56,8 @@ public final class Multinomial extends RawOp implements Opera private Output output; - private Multinomial(Operation operation) { - super(operation); + public Multinomial(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -180,6 +186,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = Multinomial.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java index bbf03174017..95a3f41e8bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = NonDeterministicInts.OP_NAME, + inputsClass = NonDeterministicInts.Inputs.class +) public final class NonDeterministicInts extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class NonDeterministicInts extends RawOp implement private Output output; - private NonDeterministicInts(Operation operation) { - super(operation); + public NonDeterministicInts(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = NonDeterministicInts.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java index bce121c2d44..4d04fb97e18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ParameterizedTruncatedNormal.OP_NAME, + inputsClass = ParameterizedTruncatedNormal.Inputs.class +) @Operator( group = "random" ) @@ -49,8 +55,8 @@ public final class ParameterizedTruncatedNormal extends RawOp private Output output; - private ParameterizedTruncatedNormal(Operation operation) { - super(operation); + public ParameterizedTruncatedNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -167,6 +173,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = ParameterizedTruncatedNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. Batches are indexed by the 0th dimension. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java index eecd013f0af..bbef7f42515 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomGamma.OP_NAME, + inputsClass = RandomGamma.Inputs.class +) @Operator( group = "random" ) @@ -50,8 +56,8 @@ public final class RandomGamma extends RawOp implements Opera private Output output; - private RandomGamma(Operation operation) { - super(operation); + public RandomGamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -163,6 +169,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomGamma.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D integer tensor. Shape of independent samples to draw from each diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java index 2355dde788e..220cfa27ee3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomGammaGrad.OP_NAME, + inputsClass = RandomGammaGrad.Inputs.class +) public final class RandomGammaGrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RandomGammaGrad extends RawOp implements O private Output output; - private RandomGammaGrad(Operation operation) { - super(operation); + public RandomGammaGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RandomGammaGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The alpha input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java index 2e98623df39..1f58f155643 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomPoisson.OP_NAME, + inputsClass = RandomPoisson.Inputs.class +) @Operator( group = "random" ) @@ -57,8 +63,8 @@ public final class RandomPoisson extends RawOp implements Ope private Output output; - private RandomPoisson(Operation operation) { - super(operation); + public RandomPoisson(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -192,6 +198,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomPoisson.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D integer tensor. Shape of independent samples to draw from each diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java index 35dad551e97..93f7482cbdc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomShuffle.OP_NAME, + inputsClass = RandomShuffle.Inputs.class +) @Operator( group = "random" ) @@ -55,8 +61,8 @@ public final class RandomShuffle extends RawOp implements Opera private Output output; - private RandomShuffle(Operation operation) { - super(operation); + public RandomShuffle(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -163,6 +169,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomShuffle.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor to be shuffled. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java index fee428b29b7..91fc67dbcf3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomStandardNormal.OP_NAME, + inputsClass = RandomStandardNormal.Inputs.class +) @Operator( group = "random" ) @@ -49,8 +55,8 @@ public final class RandomStandardNormal extends RawOp impleme private Output output; - private RandomStandardNormal(Operation operation) { - super(operation); + public RandomStandardNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomStandardNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java index 25ded48d9bf..4cac0972cd5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomUniform.OP_NAME, + inputsClass = RandomUniform.Inputs.class +) @Operator( group = "random" ) @@ -50,8 +56,8 @@ public final class RandomUniform extends RawOp implements Ope private Output output; - private RandomUniform(Operation operation) { - super(operation); + public RandomUniform(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomUniform.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java index 417766f1e2a..3121cc28720 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RandomUniformInt.OP_NAME, + inputsClass = RandomUniformInt.Inputs.class +) @Operator( group = "random" ) @@ -53,8 +59,8 @@ public final class RandomUniformInt extends RawOp implements private Output output; - private RandomUniformInt(Operation operation) { - super(operation); + public RandomUniformInt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -164,6 +170,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = RandomUniformInt.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java index 9b4cb861f58..978e2b9b1cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RecordInput.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; /** * Emits randomized records. */ +@OpMetadata( + opType = RecordInput.OP_NAME, + inputsClass = RecordInput.Inputs.class +) @Operator( group = "random" ) @@ -44,8 +50,8 @@ public final class RecordInput extends RawOp implements Operand { private Output records; - private RecordInput(Operation operation) { - super(operation); + public RecordInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; records = operation.output(outputIdx++); } @@ -253,6 +259,9 @@ public Options compressionType(String compressionType) { } } + @OpInputsMetadata( + outputsClass = RecordInput.class + ) public static class Inputs extends RawOpInputs { /** * Glob pattern for the data files. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java index d6ceea1475b..8ac6ce37392 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngReadAndSkip.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * (or any other distribution). The actual increment added to the * counter is an unspecified implementation choice. */ +@OpMetadata( + opType = RngReadAndSkip.OP_NAME, + inputsClass = RngReadAndSkip.Inputs.class +) public final class RngReadAndSkip extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RngReadAndSkip extends RawOp implements Operand { private Output value; - private RngReadAndSkip(Operation operation) { - super(operation); + public RngReadAndSkip(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; value = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return value; } + @OpInputsMetadata( + outputsClass = RngReadAndSkip.class + ) public static class Inputs extends RawOpInputs { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java index e7eaaa81283..8e53d849cb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RngSkip.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -36,14 +38,18 @@ * (or any other distribution). The actual increment added to the * counter is an unspecified implementation detail. */ +@OpMetadata( + opType = RngSkip.OP_NAME, + inputsClass = RngSkip.Inputs.class +) public final class RngSkip extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "RngSkip"; - private RngSkip(Operation operation) { - super(operation); + public RngSkip(Operation operation) { + super(operation, OP_NAME); } /** @@ -67,6 +73,9 @@ public static RngSkip create(Scope scope, Operand resource, return new RngSkip(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = RngSkip.class + ) public static class Inputs extends RawOpInputs { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java index 8f1467afe73..08ae95ce49d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulRandomBinomial.OP_NAME, + inputsClass = StatefulRandomBinomial.Inputs.class +) @Operator( group = "random" ) @@ -50,8 +56,8 @@ public final class StatefulRandomBinomial extends RawOp imple private Output output; - private StatefulRandomBinomial(Operation operation) { - super(operation); + public StatefulRandomBinomial(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -121,6 +127,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulRandomBinomial.class + ) public static class Inputs extends RawOpInputs> { /** * The resource input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java index f85677851fe..57c4ae60efc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulStandardNormal.OP_NAME, + inputsClass = StatefulStandardNormal.Inputs.class +) @Operator( group = "random" ) @@ -51,8 +57,8 @@ public final class StatefulStandardNormal extends RawOp impleme private Output output; - private StatefulStandardNormal(Operation operation) { - super(operation); + public StatefulStandardNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -114,6 +120,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulStandardNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java index ded58540672..962a6cfc095 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulTruncatedNormal.OP_NAME, + inputsClass = StatefulTruncatedNormal.Inputs.class +) public final class StatefulTruncatedNormal extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class StatefulTruncatedNormal extends RawOp implem private Output output; - private StatefulTruncatedNormal(Operation operation) { - super(operation); + public StatefulTruncatedNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -112,6 +118,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulTruncatedNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java index 4605231e1cb..fb3280dcc53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulUniform.OP_NAME, + inputsClass = StatefulUniform.Inputs.class +) public final class StatefulUniform extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class StatefulUniform extends RawOp implements Ope private Output output; - private StatefulUniform(Operation operation) { - super(operation); + public StatefulUniform(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -110,6 +116,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulUniform.class + ) public static class Inputs extends RawOpInputs> { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java index 527c1649f40..8dd4a433e45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulUniformFullInt.OP_NAME, + inputsClass = StatefulUniformFullInt.Inputs.class +) public final class StatefulUniformFullInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class StatefulUniformFullInt extends RawOp impleme private Output output; - private StatefulUniformFullInt(Operation operation) { - super(operation); + public StatefulUniformFullInt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulUniformFullInt.class + ) public static class Inputs extends RawOpInputs> { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java index e9d475b2d72..2b723d4c6f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatefulUniformInt.OP_NAME, + inputsClass = StatefulUniformInt.Inputs.class +) public final class StatefulUniformInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class StatefulUniformInt extends RawOp implements private Output output; - private StatefulUniformInt(Operation operation) { - super(operation); + public StatefulUniformInt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatefulUniformInt.class + ) public static class Inputs extends RawOpInputs> { /** * The handle of the resource variable that stores the state of the RNG. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java index 6d09e4dd755..adec914d632 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessMultinomial.OP_NAME, + inputsClass = StatelessMultinomial.Inputs.class +) @Operator( group = "random" ) @@ -50,8 +56,8 @@ public final class StatelessMultinomial extends RawOp impleme private Output output; - private StatelessMultinomial(Operation operation) { - super(operation); + public StatelessMultinomial(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -115,6 +121,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessMultinomial.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java index 70f982ac127..712a9fd0c57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessParameterizedTruncatedNormal.OP_NAME, + inputsClass = StatelessParameterizedTruncatedNormal.Inputs.class +) public final class StatelessParameterizedTruncatedNormal extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class StatelessParameterizedTruncatedNormal exte private Output output; - private StatelessParameterizedTruncatedNormal(Operation operation) { - super(operation); + public StatelessParameterizedTruncatedNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessParameterizedTruncatedNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java index 555f7113290..be2f0f66a1f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomBinomial.OP_NAME, + inputsClass = StatelessRandomBinomial.Inputs.class +) public final class StatelessRandomBinomial extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class StatelessRandomBinomial extends RawOp impl private Output output; - private StatelessRandomBinomial(Operation operation) { - super(operation); + public StatelessRandomBinomial(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -119,6 +125,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomBinomial.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java index b5383fa2d8a..2b53b2b1ab0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomGamma.OP_NAME, + inputsClass = StatelessRandomGamma.Inputs.class +) public final class StatelessRandomGamma extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class StatelessRandomGamma extends RawOp impleme private Output output; - private StatelessRandomGamma(Operation operation) { - super(operation); + public StatelessRandomGamma(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomGamma.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java index 762d5f95512..db2e3a5581b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounterAlg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * Picks the best algorithm based on device, and scrambles seed into key and counter. * This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers). */ +@OpMetadata( + opType = StatelessRandomGetKeyCounterAlg.OP_NAME, + inputsClass = StatelessRandomGetKeyCounterAlg.Inputs.class +) public final class StatelessRandomGetKeyCounterAlg extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class StatelessRandomGetKeyCounterAlg extends RawOp { private Output alg; @SuppressWarnings("unchecked") - private StatelessRandomGetKeyCounterAlg(Operation operation) { - super(operation); + public StatelessRandomGetKeyCounterAlg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; key = operation.output(outputIdx++); counter = operation.output(outputIdx++); @@ -101,6 +107,9 @@ public Output alg() { return alg; } + @OpInputsMetadata( + outputsClass = StatelessRandomGetKeyCounterAlg.class + ) public static class Inputs extends RawOpInputs { /** * 2 seeds (shape [2]). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java index dc42caec6c2..11cc0898f5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomNormal.OP_NAME, + inputsClass = StatelessRandomNormal.Inputs.class +) @Operator( group = "random" ) @@ -51,8 +57,8 @@ public final class StatelessRandomNormal extends RawOp implem private Output output; - private StatelessRandomNormal(Operation operation) { - super(operation); + public StatelessRandomNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -109,6 +115,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java index 09e30c2e27c..ec9804de75e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomNormalV2.OP_NAME, + inputsClass = StatelessRandomNormalV2.Inputs.class +) public final class StatelessRandomNormalV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class StatelessRandomNormalV2 extends RawOp impl private Output output; - private StatelessRandomNormalV2(Operation operation) { - super(operation); + public StatelessRandomNormalV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -115,6 +121,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomNormalV2.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java index 4986962addd..6386c6a86de 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomPoisson.OP_NAME, + inputsClass = StatelessRandomPoisson.Inputs.class +) public final class StatelessRandomPoisson extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class StatelessRandomPoisson extends RawOp imple private Output output; - private StatelessRandomPoisson(Operation operation) { - super(operation); + public StatelessRandomPoisson(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomPoisson.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java index 6ea9c4ff4b9..afab5de72ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniform.OP_NAME, + inputsClass = StatelessRandomUniform.Inputs.class +) @Operator( group = "random" ) @@ -52,8 +58,8 @@ public final class StatelessRandomUniform extends RawOp imple private Output output; - private StatelessRandomUniform(Operation operation) { - super(operation); + public StatelessRandomUniform(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -110,6 +116,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniform.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java index 4857e11b0b0..bbe9d36c533 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniformFullInt.OP_NAME, + inputsClass = StatelessRandomUniformFullInt.Inputs.class +) public final class StatelessRandomUniformFullInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class StatelessRandomUniformFullInt extends RawO private Output output; - private StatelessRandomUniformFullInt(Operation operation) { - super(operation); + public StatelessRandomUniformFullInt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniformFullInt.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java index d31fd80c989..8c61332aacb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniformFullIntV2.OP_NAME, + inputsClass = StatelessRandomUniformFullIntV2.Inputs.class +) public final class StatelessRandomUniformFullIntV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class StatelessRandomUniformFullIntV2 extends Ra private Output output; - private StatelessRandomUniformFullIntV2(Operation operation) { - super(operation); + public StatelessRandomUniformFullIntV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniformFullIntV2.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java index 413724622a7..02676c26abd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniformInt.OP_NAME, + inputsClass = StatelessRandomUniformInt.Inputs.class +) public final class StatelessRandomUniformInt extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class StatelessRandomUniformInt extends RawOp im private Output output; - private StatelessRandomUniformInt(Operation operation) { - super(operation); + public StatelessRandomUniformInt(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniformInt.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java index 133643bc187..7160aa2f156 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniformIntV2.OP_NAME, + inputsClass = StatelessRandomUniformIntV2.Inputs.class +) public final class StatelessRandomUniformIntV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class StatelessRandomUniformIntV2 extends RawOp private Output output; - private StatelessRandomUniformIntV2(Operation operation) { - super(operation); + public StatelessRandomUniformIntV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniformIntV2.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java index e6414b181d2..8a7e5aa6b57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessRandomUniformV2.OP_NAME, + inputsClass = StatelessRandomUniformV2.Inputs.class +) public final class StatelessRandomUniformV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class StatelessRandomUniformV2 extends RawOp imp private Output output; - private StatelessRandomUniformV2(Operation operation) { - super(operation); + public StatelessRandomUniformV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -116,6 +122,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessRandomUniformV2.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java index bc78fb53e6f..92be5db97b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessTruncatedNormal.OP_NAME, + inputsClass = StatelessTruncatedNormal.Inputs.class +) @Operator( group = "random" ) @@ -53,8 +59,8 @@ public final class StatelessTruncatedNormal extends RawOp imp private Output output; - private StatelessTruncatedNormal(Operation operation) { - super(operation); + public StatelessTruncatedNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -111,6 +117,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessTruncatedNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java index da649d92ac0..318c1ccd592 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = StatelessTruncatedNormalV2.OP_NAME, + inputsClass = StatelessTruncatedNormalV2.Inputs.class +) public final class StatelessTruncatedNormalV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -51,8 +57,8 @@ public final class StatelessTruncatedNormalV2 extends RawOp i private Output output; - private StatelessTruncatedNormalV2(Operation operation) { - super(operation); + public StatelessTruncatedNormalV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -117,6 +123,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StatelessTruncatedNormalV2.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java index 2f0d60114e5..f03d07368dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TruncatedNormal.OP_NAME, + inputsClass = TruncatedNormal.Inputs.class +) @Operator( group = "random" ) @@ -51,8 +57,8 @@ public final class TruncatedNormal extends RawOp implements O private Output output; - private TruncatedNormal(Operation operation) { - super(operation); + public TruncatedNormal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -161,6 +167,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = TruncatedNormal.class + ) public static class Inputs extends RawOpInputs> { /** * The shape of the output tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java index 5d95242967d..71d248ebf36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/UniformCandidateSampler.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * the sampled candidates must be chosen independently of the context and of the * true labels. */ +@OpMetadata( + opType = UniformCandidateSampler.OP_NAME, + inputsClass = UniformCandidateSampler.Inputs.class +) @Operator( group = "random" ) @@ -56,8 +62,8 @@ public final class UniformCandidateSampler extends RawOp { private Output sampledExpectedCount; - private UniformCandidateSampler(Operation operation) { - super(operation); + public UniformCandidateSampler(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sampledCandidates = operation.output(outputIdx++); trueExpectedCount = operation.output(outputIdx++); @@ -194,6 +200,9 @@ public Options seed2(Long seed2) { } } + @OpInputsMetadata( + outputsClass = UniformCandidateSampler.class + ) public static class Inputs extends RawOpInputs { /** * A batch_size * num_true matrix, in which each row contains the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java index 167ed5dd8c8..e189877adb8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = CollectiveBcastRecvV2.OP_NAME, + inputsClass = CollectiveBcastRecvV2.Inputs.class +) public final class CollectiveBcastRecvV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class CollectiveBcastRecvV2 extends RawOp implemen private Output data; - private CollectiveBcastRecvV2(Operation operation) { - super(operation); + public CollectiveBcastRecvV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -158,6 +164,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = CollectiveBcastRecvV2.class + ) public static class Inputs extends RawOpInputs> { /** * The groupSize input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java index afce1f5da7f..68a56685261 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code data} output */ +@OpMetadata( + opType = CollectiveBcastSendV2.OP_NAME, + inputsClass = CollectiveBcastSendV2.Inputs.class +) public final class CollectiveBcastSendV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class CollectiveBcastSendV2 extends RawOp implemen private Output data; - private CollectiveBcastSendV2(Operation operation) { - super(operation); + public CollectiveBcastSendV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } } + @OpInputsMetadata( + outputsClass = CollectiveBcastSendV2.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java index 472e350db9e..60f0af859fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Returns the {@code tf.data.Options} attached to {@code input_dataset}. */ +@OpMetadata( + opType = GetOptions.OP_NAME, + inputsClass = GetOptions.Inputs.class +) public final class GetOptions extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class GetOptions extends RawOp implements Operand { private Output serializedOptions; - private GetOptions(Operation operation) { - super(operation); + public GetOptions(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; serializedOptions = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return serializedOptions; } + @OpInputsMetadata( + outputsClass = GetOptions.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing the input dataset. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java index 10afcd94130..e3a751e5fb0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingFrequencyEstimatorParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingFrequencyEstimatorParameters.Inputs.class +) public final class LoadTPUEmbeddingFrequencyEstimatorParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingFrequencyEstimatorParameters"; - private LoadTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -161,6 +167,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingFrequencyEstimatorParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the frequency estimator optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java index 0ccf337801b..20dff3115d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"; - private LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -164,6 +170,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the frequency estimator optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java index 37ad5cf9e4b..e50431dc817 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingFrequencyEstimatorParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParameters.Inputs.class +) public final class RetrieveTPUEmbeddingFrequencyEstimatorParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RetrieveTPUEmbeddingFrequencyEstimatorParameters extends RawO private Output lastHitStep; - private RetrieveTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingFrequencyEstimatorParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); lastHitStep = operation.output(outputIdx++); @@ -181,6 +187,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java index 4a9031c45e1..15fb4ba7a5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebu private Output gradientAccumulators; - private RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); lastHitStep = operation.output(outputIdx++); @@ -194,6 +200,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java index e5daa87691e..eef87bc6798 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; /** * Picks the best counter-based RNG algorithm based on device. * This op picks the best counter-based RNG algorithm based on device. */ +@OpMetadata( + opType = StatelessRandomGetAlg.OP_NAME, + inputsClass = StatelessRandomGetAlg.Inputs.class +) public final class StatelessRandomGetAlg extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StatelessRandomGetAlg extends RawOp implements Operand alg; - private StatelessRandomGetAlg(Operation operation) { - super(operation); + public StatelessRandomGetAlg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; alg = operation.output(outputIdx++); } @@ -75,6 +81,9 @@ public Output asOutput() { return alg; } + @OpInputsMetadata( + outputsClass = StatelessRandomGetAlg.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new StatelessRandomGetAlg(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java index 16c50e5c0ea..184f9765210 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Scrambles seed into key and counter, using the best algorithm based on device. * This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers). */ +@OpMetadata( + opType = StatelessRandomGetKeyCounter.OP_NAME, + inputsClass = StatelessRandomGetKeyCounter.Inputs.class +) public final class StatelessRandomGetKeyCounter extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class StatelessRandomGetKeyCounter extends RawOp { private Output counter; @SuppressWarnings("unchecked") - private StatelessRandomGetKeyCounter(Operation operation) { - super(operation); + public StatelessRandomGetKeyCounter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; key = operation.output(outputIdx++); counter = operation.output(outputIdx++); @@ -87,6 +93,9 @@ public Output counter() { return counter; } + @OpInputsMetadata( + outputsClass = StatelessRandomGetKeyCounter.class + ) public static class Inputs extends RawOpInputs { /** * 2 seeds (shape [2]). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java index a1bfbcd2bc3..48bb399cb03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAbs.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscAbs.OP_NAME, + inputsClass = RiscAbs.Inputs.class +) public final class RiscAbs extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscAbs extends RawOp implements Operand y; - private RiscAbs(Operation operation) { - super(operation); + public RiscAbs(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscAbs.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java index cbaeb97e7db..4bf45e6ce3e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscAdd.OP_NAME, + inputsClass = RiscAdd.Inputs.class +) public final class RiscAdd extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RiscAdd extends RawOp implements Operand z; - private RiscAdd(Operation operation) { - super(operation); + public RiscAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscAdd.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java index ffe646419f5..f5f7cb5bbe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryArithmetic.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscBinaryArithmetic.OP_NAME, + inputsClass = RiscBinaryArithmetic.Inputs.class +) public final class RiscBinaryArithmetic extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscBinaryArithmetic extends RawOp impleme private Output z; - private RiscBinaryArithmetic(Operation operation) { - super(operation); + public RiscBinaryArithmetic(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscBinaryArithmetic.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java index d0221df4424..fe475060238 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBinaryComparison.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * The RiscBinaryComparison operation */ +@OpMetadata( + opType = RiscBinaryComparison.OP_NAME, + inputsClass = RiscBinaryComparison.Inputs.class +) public final class RiscBinaryComparison extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class RiscBinaryComparison extends RawOp implements Operand private Output z; - private RiscBinaryComparison(Operation operation) { - super(operation); + public RiscBinaryComparison(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscBinaryComparison.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java index 9e13bc53c9a..6d2c64e587b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBitcast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscBitcast.OP_NAME, + inputsClass = RiscBitcast.Inputs.class +) public final class RiscBitcast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscBitcast extends RawOp implements Operand private Output y; - private RiscBitcast(Operation operation) { - super(operation); + public RiscBitcast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscBitcast.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java index f23d678ee3c..25885b631b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscBroadcast.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscBroadcast.OP_NAME, + inputsClass = RiscBroadcast.Inputs.class +) public final class RiscBroadcast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscBroadcast extends RawOp implements Opera private Output output; - private RiscBroadcast(Operation operation) { - super(operation); + public RiscBroadcast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscBroadcast.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java index 19713cb9325..4b14d20072a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCast.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscCast.OP_NAME, + inputsClass = RiscCast.Inputs.class +) public final class RiscCast extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscCast extends RawOp implements Operand private Output y; - private RiscCast(Operation operation) { - super(operation); + public RiscCast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscCast.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java index 993e6fe0c4f..cf6173756f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCeil.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscCeil.OP_NAME, + inputsClass = RiscCeil.Inputs.class +) public final class RiscCeil extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscCeil extends RawOp implements Operand< private Output y; - private RiscCeil(Operation operation) { - super(operation); + public RiscCeil(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscCeil.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java index 214e0f0729c..603c50dfbc4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCholesky.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscCholesky.OP_NAME, + inputsClass = RiscCholesky.Inputs.class +) public final class RiscCholesky extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscCholesky extends RawOp implements Oper private Output output; - private RiscCholesky(Operation operation) { - super(operation); + public RiscCholesky(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscCholesky.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java index b09c64975f3..8097ccaf66e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConcat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscConcat.OP_NAME, + inputsClass = RiscConcat.Inputs.class +) public final class RiscConcat extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RiscConcat extends RawOp implements Operand< private Output output; - private RiscConcat(Operation operation) { - super(operation); + public RiscConcat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscConcat.class + ) public static class Inputs extends RawOpInputs> { /** * The values input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java index 4799eeaea37..b899ee37385 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCondition.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscCondition.OP_NAME, + inputsClass = RiscCondition.Inputs.class +) public final class RiscCondition extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RiscCondition extends RawOp implements Ope private Output output; - private RiscCondition(Operation operation) { - super(operation); + public RiscCondition(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscCondition.class + ) public static class Inputs extends RawOpInputs> { /** * The pred input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java index eef8246faa8..5315c043ea6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscConv.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscConv.OP_NAME, + inputsClass = RiscConv.Inputs.class +) public final class RiscConv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscConv extends RawOp implements Operand< private Output output; - private RiscConv(Operation operation) { - super(operation); + public RiscConv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -180,6 +186,9 @@ public Options dilations(Long... dilations) { } } + @OpInputsMetadata( + outputsClass = RiscConv.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java index db02acbfcdf..95be103407b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscCos.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscCos.OP_NAME, + inputsClass = RiscCos.Inputs.class +) public final class RiscCos extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscCos extends RawOp implements Operand y; - private RiscCos(Operation operation) { - super(operation); + public RiscCos(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscCos.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java index 6dcdf4e796b..4feb615f370 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscDiv.OP_NAME, + inputsClass = RiscDiv.Inputs.class +) public final class RiscDiv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscDiv extends RawOp implements Operand z; - private RiscDiv(Operation operation) { - super(operation); + public RiscDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscDiv.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java index a3db102582d..a64374075ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscDot.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code product} output */ +@OpMetadata( + opType = RiscDot.OP_NAME, + inputsClass = RiscDot.Inputs.class +) public final class RiscDot extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscDot extends RawOp implements Operand product; - private RiscDot(Operation operation) { - super(operation); + public RiscDot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options transposeB(Boolean transposeB) { } } + @OpInputsMetadata( + outputsClass = RiscDot.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java index 6868da2c4bb..5f8a0095d84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscExp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscExp.OP_NAME, + inputsClass = RiscExp.Inputs.class +) public final class RiscExp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscExp extends RawOp implements Operand y; - private RiscExp(Operation operation) { - super(operation); + public RiscExp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscExp.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java index 31b27b88913..f418d9cd514 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFft.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscFft.OP_NAME, + inputsClass = RiscFft.Inputs.class +) public final class RiscFft extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscFft extends RawOp implements Operand private Output output; - private RiscFft(Operation operation) { - super(operation); + public RiscFft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscFft.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java index aabcfb22dc7..4091e029022 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscFloor.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscFloor.OP_NAME, + inputsClass = RiscFloor.Inputs.class +) public final class RiscFloor extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscFloor extends RawOp implements Operand private Output y; - private RiscFloor(Operation operation) { - super(operation); + public RiscFloor(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscFloor.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java index f4128861b39..bbf099905fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscGather.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscGather.OP_NAME, + inputsClass = RiscGather.Inputs.class +) public final class RiscGather extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscGather extends RawOp implements Operand< private Output output; - private RiscGather(Operation operation) { - super(operation); + public RiscGather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -125,6 +131,9 @@ public Options batchDims(Long batchDims) { } } + @OpInputsMetadata( + outputsClass = RiscGather.class + ) public static class Inputs extends RawOpInputs> { /** * The params input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java index 120bb80b8ea..bef9b1f68c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscImag.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscImag.OP_NAME, + inputsClass = RiscImag.Inputs.class +) public final class RiscImag extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RiscImag extends RawOp implements Operand< private Output output; - private RiscImag(Operation operation) { - super(operation); + public RiscImag(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscImag.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java index b87b3932c05..de9e34af151 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscIsFinite.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * The RiscIsFinite operation */ +@OpMetadata( + opType = RiscIsFinite.OP_NAME, + inputsClass = RiscIsFinite.Inputs.class +) public final class RiscIsFinite extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class RiscIsFinite extends RawOp implements Operand { private Output y; - private RiscIsFinite(Operation operation) { - super(operation); + public RiscIsFinite(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscIsFinite.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java index b4428844f90..083a4ef1f51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLog.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscLog.OP_NAME, + inputsClass = RiscLog.Inputs.class +) public final class RiscLog extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscLog extends RawOp implements Operand y; - private RiscLog(Operation operation) { - super(operation); + public RiscLog(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscLog.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java index a4ef38dd73d..d0cde8399e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalAnd.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; /** * The RiscLogicalAnd operation */ +@OpMetadata( + opType = RiscLogicalAnd.OP_NAME, + inputsClass = RiscLogicalAnd.Inputs.class +) public final class RiscLogicalAnd extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -40,8 +46,8 @@ public final class RiscLogicalAnd extends RawOp implements Operand { private Output z; - private RiscLogicalAnd(Operation operation) { - super(operation); + public RiscLogicalAnd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscLogicalAnd.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java index c67de8825d5..f9a7084dd1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalNot.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; /** * The RiscLogicalNot operation */ +@OpMetadata( + opType = RiscLogicalNot.OP_NAME, + inputsClass = RiscLogicalNot.Inputs.class +) public final class RiscLogicalNot extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -40,8 +46,8 @@ public final class RiscLogicalNot extends RawOp implements Operand { private Output z; - private RiscLogicalNot(Operation operation) { - super(operation); + public RiscLogicalNot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -76,6 +82,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscLogicalNot.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java index 59273fb3f82..85d2fa52e18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscLogicalOr.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; /** * The RiscLogicalOr operation */ +@OpMetadata( + opType = RiscLogicalOr.OP_NAME, + inputsClass = RiscLogicalOr.Inputs.class +) public final class RiscLogicalOr extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -40,8 +46,8 @@ public final class RiscLogicalOr extends RawOp implements Operand { private Output z; - private RiscLogicalOr(Operation operation) { - super(operation); + public RiscLogicalOr(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscLogicalOr.class + ) public static class Inputs extends RawOpInputs { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java index c5a73463a66..b8192bb2767 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code max} output */ +@OpMetadata( + opType = RiscMax.OP_NAME, + inputsClass = RiscMax.Inputs.class +) public final class RiscMax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RiscMax extends RawOp implements Operand max; - private RiscMax(Operation operation) { - super(operation); + public RiscMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; max = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return max; } + @OpInputsMetadata( + outputsClass = RiscMax.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java index 5d565b09d10..868ea20be57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscMin.OP_NAME, + inputsClass = RiscMin.Inputs.class +) public final class RiscMin extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscMin extends RawOp implements Operand z; - private RiscMin(Operation operation) { - super(operation); + public RiscMin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscMin.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java index 64285c41c37..3ed0db57db9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscMul.OP_NAME, + inputsClass = RiscMul.Inputs.class +) public final class RiscMul extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscMul extends RawOp implements Operand z; - private RiscMul(Operation operation) { - super(operation); + public RiscMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscMul.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java index 9a759a04a6f..dca8cb7339b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscNeg.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscNeg.OP_NAME, + inputsClass = RiscNeg.Inputs.class +) public final class RiscNeg extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscNeg extends RawOp implements Operand y; - private RiscNeg(Operation operation) { - super(operation); + public RiscNeg(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscNeg.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java index 96d50ba86d5..9d3be8d9f94 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscPad.OP_NAME, + inputsClass = RiscPad.Inputs.class +) public final class RiscPad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscPad extends RawOp implements Operand output; - private RiscPad(Operation operation) { - super(operation); + public RiscPad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscPad.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java index 0ee16aa7da8..9e905d099a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPool.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscPool.OP_NAME, + inputsClass = RiscPool.Inputs.class +) public final class RiscPool extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscPool extends RawOp implements Operand< private Output output; - private RiscPool(Operation operation) { - super(operation); + public RiscPool(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -135,6 +141,9 @@ public Options dataFormat(String dataFormat) { } } + @OpInputsMetadata( + outputsClass = RiscPool.class + ) public static class Inputs extends RawOpInputs> { /** * The value input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java index 531ea2570e0..b92f2416109 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscPow.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscPow.OP_NAME, + inputsClass = RiscPow.Inputs.class +) public final class RiscPow extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscPow extends RawOp implements Operand z; - private RiscPow(Operation operation) { - super(operation); + public RiscPow(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscPow.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java index b5b1c995a4e..314086dc12c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRandomUniform.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * The RiscRandomUniform operation */ +@OpMetadata( + opType = RiscRandomUniform.OP_NAME, + inputsClass = RiscRandomUniform.Inputs.class +) public final class RiscRandomUniform extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class RiscRandomUniform extends RawOp implements Operand private Output output; - private RiscRandomUniform(Operation operation) { - super(operation); + public RiscRandomUniform(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -118,6 +124,9 @@ public Options seed(Long seed) { } } + @OpInputsMetadata( + outputsClass = RiscRandomUniform.class + ) public static class Inputs extends RawOpInputs { /** * The shape input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java index 4c0e1b00708..a39747c841d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReal.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscReal.OP_NAME, + inputsClass = RiscReal.Inputs.class +) public final class RiscReal extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class RiscReal extends RawOp implements Operand< private Output output; - private RiscReal(Operation operation) { - super(operation); + public RiscReal(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscReal.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java index 68a4fb6e6cb..de63c91a2ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReduce.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscReduce.OP_NAME, + inputsClass = RiscReduce.Inputs.class +) public final class RiscReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscReduce extends RawOp implements Operan private Output output; - private RiscReduce(Operation operation) { - super(operation); + public RiscReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscReduce.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java index a05a200a662..7e23a40f6ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscRem.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscRem.OP_NAME, + inputsClass = RiscRem.Inputs.class +) public final class RiscRem extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscRem extends RawOp implements Operand z; - private RiscRem(Operation operation) { - super(operation); + public RiscRem(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscRem.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java index 1d31b746200..c4644eaba0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReshape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscReshape.OP_NAME, + inputsClass = RiscReshape.Inputs.class +) public final class RiscReshape extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscReshape extends RawOp implements Opera private Output output; - private RiscReshape(Operation operation) { - super(operation); + public RiscReshape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscReshape.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java index 6c63576cece..a0d5274b4c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscReverse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscReverse.OP_NAME, + inputsClass = RiscReverse.Inputs.class +) public final class RiscReverse extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscReverse extends RawOp implements Opera private Output output; - private RiscReverse(Operation operation) { - super(operation); + public RiscReverse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -83,6 +89,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscReverse.class + ) public static class Inputs extends RawOpInputs> { /** * The tensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java index 9d4b7e75a5f..d3cff331777 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscScatter.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscScatter.OP_NAME, + inputsClass = RiscScatter.Inputs.class +) public final class RiscScatter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscScatter extends RawOp implements Opera private Output output; - private RiscScatter(Operation operation) { - super(operation); + public RiscScatter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscScatter.class + ) public static class Inputs extends RawOpInputs> { /** * The indices input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java index 48b15342aa8..885afc9637e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscShape.OP_NAME, + inputsClass = RiscShape.Inputs.class +) public final class RiscShape extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RiscShape extends RawOp implements Operand private Output output; - private RiscShape(Operation operation) { - super(operation); + public RiscShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscShape.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java index d9dc1645279..5fad88fcaee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscSign.OP_NAME, + inputsClass = RiscSign.Inputs.class +) public final class RiscSign extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscSign extends RawOp implements Operand< private Output y; - private RiscSign(Operation operation) { - super(operation); + public RiscSign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscSign.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java index 17c1ae867f8..4c1e1b32459 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSlice.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscSlice.OP_NAME, + inputsClass = RiscSlice.Inputs.class +) public final class RiscSlice extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscSlice extends RawOp implements Operand private Output output; - private RiscSlice(Operation operation) { - super(operation); + public RiscSlice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -86,6 +92,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscSlice.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java index 05f439e84d7..5a47733f8e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSort.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscSort.OP_NAME, + inputsClass = RiscSort.Inputs.class +) public final class RiscSort extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscSort extends RawOp implements Operand< private Output output; - private RiscSort(Operation operation) { - super(operation); + public RiscSort(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RiscSort.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java index c10e67550b5..c88d61ada53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSqueeze.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscSqueeze.OP_NAME, + inputsClass = RiscSqueeze.Inputs.class +) public final class RiscSqueeze extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscSqueeze extends RawOp implements Operand private Output output; - private RiscSqueeze(Operation operation) { - super(operation); + public RiscSqueeze(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options squeezeDims(Long... squeezeDims) { } } + @OpInputsMetadata( + outputsClass = RiscSqueeze.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java index 2da64a6b5fa..0377fff1210 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscSub.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code z} output */ +@OpMetadata( + opType = RiscSub.OP_NAME, + inputsClass = RiscSub.Inputs.class +) public final class RiscSub extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscSub extends RawOp implements Operand z; - private RiscSub(Operation operation) { - super(operation); + public RiscSub(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; z = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return z; } + @OpInputsMetadata( + outputsClass = RiscSub.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java index 58d0eb662d5..bfa7c1feadc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTranspose.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscTranspose.OP_NAME, + inputsClass = RiscTranspose.Inputs.class +) public final class RiscTranspose extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RiscTranspose extends RawOp implements Opera private Output y; - private RiscTranspose(Operation operation) { - super(operation); + public RiscTranspose(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscTranspose.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java index cfd5976bb4c..a156d728712 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscTriangularSolve.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RiscTriangularSolve.OP_NAME, + inputsClass = RiscTriangularSolve.Inputs.class +) public final class RiscTriangularSolve extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscTriangularSolve extends RawOp implemen private Output output; - private RiscTriangularSolve(Operation operation) { - super(operation); + public RiscTriangularSolve(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -148,6 +154,9 @@ public Options adjoint(Boolean adjoint) { } } + @OpInputsMetadata( + outputsClass = RiscTriangularSolve.class + ) public static class Inputs extends RawOpInputs> { /** * The matrix input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java index 59b52dc8a17..46922a99b60 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscUnary.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -35,6 +37,10 @@ * * @param data type for {@code y} output */ +@OpMetadata( + opType = RiscUnary.OP_NAME, + inputsClass = RiscUnary.Inputs.class +) public final class RiscUnary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RiscUnary extends RawOp implements Operand private Output y; - private RiscUnary(Operation operation) { - super(operation); + public RiscUnary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; y = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return y; } + @OpInputsMetadata( + outputsClass = RiscUnary.class + ) public static class Inputs extends RawOpInputs> { /** * The x input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java index 010f239e8db..2e65c88f97f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/risc/RiscWhile.java @@ -32,12 +32,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * The RiscWhile operation */ +@OpMetadata( + opType = RiscWhile.OP_NAME, + inputsClass = RiscWhile.Inputs.class +) public final class RiscWhile extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RiscWhile extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private RiscWhile(Operation operation) { - super(operation); + public RiscWhile(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -181,6 +187,9 @@ public Options parallelIterations(Long parallelIterations) { } } + @OpInputsMetadata( + outputsClass = RiscWhile.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java index 31fdd2bb931..c9948f82b85 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchFFT operation */ +@OpMetadata( + opType = BatchFft.OP_NAME, + inputsClass = BatchFft.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchFft extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchFft(Operation operation) { - super(operation); + public BatchFft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchFft.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java index 1b7fb596496..5416598cdbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft2d.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchFFT2D operation */ +@OpMetadata( + opType = BatchFft2d.OP_NAME, + inputsClass = BatchFft2d.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchFft2d extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchFft2d(Operation operation) { - super(operation); + public BatchFft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchFft2d.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java index 35c940d8abf..36969696370 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchFft3d.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchFFT3D operation */ +@OpMetadata( + opType = BatchFft3d.OP_NAME, + inputsClass = BatchFft3d.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchFft3d extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchFft3d(Operation operation) { - super(operation); + public BatchFft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchFft3d.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java index 9fedc13692e..184b536fa50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchIFFT operation */ +@OpMetadata( + opType = BatchIfft.OP_NAME, + inputsClass = BatchIfft.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchIfft extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchIfft(Operation operation) { - super(operation); + public BatchIfft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchIfft.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java index c7cf6cb1e96..b57ee7979bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft2d.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchIFFT2D operation */ +@OpMetadata( + opType = BatchIfft2d.OP_NAME, + inputsClass = BatchIfft2d.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchIfft2d extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchIfft2d(Operation operation) { - super(operation); + public BatchIfft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchIfft2d.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java index c23e16b79a3..ccc3327a78c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/BatchIfft3d.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.family.TType; /** * The BatchIFFT3D operation */ +@OpMetadata( + opType = BatchIfft3d.OP_NAME, + inputsClass = BatchIfft3d.Inputs.class +) @Operator( group = "signal" ) @@ -45,8 +51,8 @@ public final class BatchIfft3d extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private BatchIfft3d(Operation operation) { - super(operation); + public BatchIfft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return (Output) output; } + @OpInputsMetadata( + outputsClass = BatchIfft3d.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java index bf800846caa..571b3424a92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Fft.OP_NAME, + inputsClass = Fft.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Fft extends RawOp implements Operand { private Output output; - private Fft(Operation operation) { - super(operation); + public Fft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Fft.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java index 097ea3cf43d..a7edbbd2580 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Fft2d.OP_NAME, + inputsClass = Fft2d.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Fft2d extends RawOp implements Operand { private Output output; - private Fft2d(Operation operation) { - super(operation); + public Fft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Fft2d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java index 7fcf6354abe..a42664be517 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Fft3d.OP_NAME, + inputsClass = Fft3d.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Fft3d extends RawOp implements Operand { private Output output; - private Fft3d(Operation operation) { - super(operation); + public Fft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Fft3d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java index 787c57575df..2e9718d758e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Ifft.OP_NAME, + inputsClass = Ifft.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Ifft extends RawOp implements Operand { private Output output; - private Ifft(Operation operation) { - super(operation); + public Ifft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Ifft.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java index fc3e42c7d18..99db6dbd9e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Ifft2d.OP_NAME, + inputsClass = Ifft2d.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Ifft2d extends RawOp implements Operand { private Output output; - private Ifft2d(Operation operation) { - super(operation); + public Ifft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Ifft2d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java index 9d9910f20b6..edb8810e3c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Ifft3d.OP_NAME, + inputsClass = Ifft3d.Inputs.class +) @Operator( group = "signal" ) @@ -49,8 +55,8 @@ public final class Ifft3d extends RawOp implements Operand { private Output output; - private Ifft3d(Operation operation) { - super(operation); + public Ifft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Ifft3d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java index bf619275e89..718337402bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Irfft.OP_NAME, + inputsClass = Irfft.Inputs.class +) @Operator( group = "signal" ) @@ -62,8 +68,8 @@ public final class Irfft extends RawOp implements Operand private Output output; - private Irfft(Operation operation) { - super(operation); + public Irfft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -125,6 +131,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Irfft.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java index 76ef5401d06..1b7e31b2ab5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -52,6 +54,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Irfft2d.OP_NAME, + inputsClass = Irfft2d.Inputs.class +) @Operator( group = "signal" ) @@ -63,8 +69,8 @@ public final class Irfft2d extends RawOp implements Operand output; - private Irfft2d(Operation operation) { - super(operation); + public Irfft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -126,6 +132,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Irfft2d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java index 951d90c24f5..d989c8f610a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -52,6 +54,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Irfft3d.OP_NAME, + inputsClass = Irfft3d.Inputs.class +) @Operator( group = "signal" ) @@ -63,8 +69,8 @@ public final class Irfft3d extends RawOp implements Operand output; - private Irfft3d(Operation operation) { - super(operation); + public Irfft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -126,6 +132,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Irfft3d.class + ) public static class Inputs extends RawOpInputs> { /** * A complex tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java index aaecb285947..3f47556ad2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -47,6 +49,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Rfft.OP_NAME, + inputsClass = Rfft.Inputs.class +) @Operator( group = "signal" ) @@ -58,8 +64,8 @@ public final class Rfft extends RawOp implements Operand { private Output output; - private Rfft(Operation operation) { - super(operation); + public Rfft(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Rfft.class + ) public static class Inputs extends RawOpInputs> { /** * A float32 tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java index 1fc337c853f..959564338a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Rfft2d.OP_NAME, + inputsClass = Rfft2d.Inputs.class +) @Operator( group = "signal" ) @@ -59,8 +65,8 @@ public final class Rfft2d extends RawOp implements Operand { private Output output; - private Rfft2d(Operation operation) { - super(operation); + public Rfft2d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Rfft2d.class + ) public static class Inputs extends RawOpInputs> { /** * A float32 tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java index e10cc8b9879..9203d09409e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Rfft3d.OP_NAME, + inputsClass = Rfft3d.Inputs.class +) @Operator( group = "signal" ) @@ -59,8 +65,8 @@ public final class Rfft3d extends RawOp implements Operand { private Output output; - private Rfft3d(Operation operation) { - super(operation); + public Rfft3d(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -107,6 +113,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Rfft3d.class + ) public static class Inputs extends RawOpInputs> { /** * A float32 tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java index 7757774ace2..5aa97de3122 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddManySparseToTensorsMap.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -53,6 +55,10 @@ * {@code sparse.AddManySparseToTensorsMap} as the {@code shared_name} passed to * {@code TakeManySparseFromTensorsMap}. Ensure the Operations are colocated. */ +@OpMetadata( + opType = AddManySparseToTensorsMap.OP_NAME, + inputsClass = AddManySparseToTensorsMap.Inputs.class +) @Operator( group = "sparse" ) @@ -64,8 +70,8 @@ public final class AddManySparseToTensorsMap extends RawOp implements Operand sparseHandles; - private AddManySparseToTensorsMap(Operation operation) { - super(operation); + public AddManySparseToTensorsMap(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseHandles = operation.output(outputIdx++); } @@ -175,6 +181,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = AddManySparseToTensorsMap.class + ) public static class Inputs extends RawOpInputs { /** * 2-D. The {@code indices} of the minibatch {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java index 8a68e68e4ca..0737e08cc4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/AddSparseToTensorsMap.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -47,6 +49,10 @@ * {@code sparse.AddSparseToTensorsMap} as the {@code shared_name} passed to * {@code TakeManySparseFromTensorsMap}. Ensure the Operations are colocated. */ +@OpMetadata( + opType = AddSparseToTensorsMap.OP_NAME, + inputsClass = AddSparseToTensorsMap.Inputs.class +) @Operator( group = "sparse" ) @@ -58,8 +64,8 @@ public final class AddSparseToTensorsMap extends RawOp implements Operand sparseHandle; - private AddSparseToTensorsMap(Operation operation) { - super(operation); + public AddSparseToTensorsMap(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseHandle = operation.output(outputIdx++); } @@ -167,6 +173,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = AddSparseToTensorsMap.class + ) public static class Inputs extends RawOpInputs { /** * 2-D. The {@code indices} of the {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java index a83f853daac..281998ef065 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = DenseCountSparseOutput.OP_NAME, + inputsClass = DenseCountSparseOutput.Inputs.class +) public final class DenseCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class DenseCountSparseOutput extends RawOp { private Output outputDenseShape; - private DenseCountSparseOutput(Operation operation) { - super(operation); + public DenseCountSparseOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -173,6 +179,9 @@ public Options maxlength(Long maxlength) { } } + @OpInputsMetadata( + outputsClass = DenseCountSparseOutput.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor containing data to count. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java index ea896eac86f..f2b77c66329 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -43,6 +45,10 @@ * * @param data type for {@code result_values} output */ +@OpMetadata( + opType = DenseToDenseSetOperation.OP_NAME, + inputsClass = DenseToDenseSetOperation.Inputs.class +) @Operator( group = "sparse" ) @@ -58,8 +64,8 @@ public final class DenseToDenseSetOperation extends RawOp { private Output resultShape; - private DenseToDenseSetOperation(Operation operation) { - super(operation); + public DenseToDenseSetOperation(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resultIndices = operation.output(outputIdx++); resultValues = operation.output(outputIdx++); @@ -158,6 +164,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = DenseToDenseSetOperation.class + ) public static class Inputs extends RawOpInputs> { /** * {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java index 6cf1c374394..7552f756671 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code result_values} output */ +@OpMetadata( + opType = DenseToSparseSetOperation.OP_NAME, + inputsClass = DenseToSparseSetOperation.Inputs.class +) @Operator( group = "sparse" ) @@ -64,8 +70,8 @@ public final class DenseToSparseSetOperation extends RawOp { private Output resultShape; - private DenseToSparseSetOperation(Operation operation) { - super(operation); + public DenseToSparseSetOperation(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resultIndices = operation.output(outputIdx++); resultValues = operation.output(outputIdx++); @@ -172,6 +178,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = DenseToSparseSetOperation.class + ) public static class Inputs extends RawOpInputs> { /** * {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java index 837e8db112b..86daa749a4e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -77,6 +79,10 @@ * * @param data type for {@code sparse_values} output */ +@OpMetadata( + opType = DeserializeSparse.OP_NAME, + inputsClass = DeserializeSparse.Inputs.class +) @Operator( group = "sparse" ) @@ -92,8 +98,8 @@ public final class DeserializeSparse extends RawOp { private Output sparseShape; - private DeserializeSparse(Operation operation) { - super(operation); + public DeserializeSparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseIndices = operation.output(outputIdx++); sparseValues = operation.output(outputIdx++); @@ -148,6 +154,9 @@ public Output sparseShape() { return sparseShape; } + @OpInputsMetadata( + outputsClass = DeserializeSparse.class + ) public static class Inputs extends RawOpInputs> { /** * The serialized {@code SparseTensor} objects. The last dimension diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java index 8496ad6a614..ce71474efcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorApplyGradient.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -37,6 +39,10 @@ * Does not add if local_step is smaller than the accumulator's * global_step. */ +@OpMetadata( + opType = SparseAccumulatorApplyGradient.OP_NAME, + inputsClass = SparseAccumulatorApplyGradient.Inputs.class +) @Operator( group = "sparse" ) @@ -46,8 +52,8 @@ public final class SparseAccumulatorApplyGradient extends RawOp { */ public static final String OP_NAME = "SparseAccumulatorApplyGradient"; - private SparseAccumulatorApplyGradient(Operation operation) { - super(operation); + public SparseAccumulatorApplyGradient(Operation operation) { + super(operation, OP_NAME); } /** @@ -83,6 +89,9 @@ public static SparseAccumulatorApplyGradient create(Scope scope, Operand { /** * The handle to a accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java index f1558dd83d4..0ed16f6b4bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -46,6 +48,10 @@ * * @param data type for {@code values} output */ +@OpMetadata( + opType = SparseAccumulatorTakeGradient.OP_NAME, + inputsClass = SparseAccumulatorTakeGradient.Inputs.class +) @Operator( group = "sparse" ) @@ -61,8 +67,8 @@ public final class SparseAccumulatorTakeGradient extends RawOp private Output shape; - private SparseAccumulatorTakeGradient(Operation operation) { - super(operation); + public SparseAccumulatorTakeGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; indices = operation.output(outputIdx++); values = operation.output(outputIdx++); @@ -119,6 +125,9 @@ public Output shape() { return shape; } + @OpInputsMetadata( + outputsClass = SparseAccumulatorTakeGradient.class + ) public static class Inputs extends RawOpInputs> { /** * The handle to a SparseConditionalAccumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java index 8e372cacf9b..ceb5a3160f6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code sum_values} output */ +@OpMetadata( + opType = SparseAdd.OP_NAME, + inputsClass = SparseAdd.Inputs.class +) @Operator( group = "sparse" ) @@ -64,8 +70,8 @@ public final class SparseAdd extends RawOp { private Output sumShape; - private SparseAdd(Operation operation) { - super(operation); + public SparseAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sumIndices = operation.output(outputIdx++); sumValues = operation.output(outputIdx++); @@ -131,6 +137,9 @@ public Output sumShape() { return sumShape; } + @OpInputsMetadata( + outputsClass = SparseAdd.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. The {@code indices} of the first {@code SparseTensor}, size {@code [nnz, ndims]} Matrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java index c99dd5a7a08..6bffeaf384b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -41,6 +43,10 @@ * * @param data type for {@code a_val_grad} output */ +@OpMetadata( + opType = SparseAddGrad.OP_NAME, + inputsClass = SparseAddGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -54,8 +60,8 @@ public final class SparseAddGrad extends RawOp { private Output bValGrad; - private SparseAddGrad(Operation operation) { - super(operation); + public SparseAddGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; aValGrad = operation.output(outputIdx++); bValGrad = operation.output(outputIdx++); @@ -107,6 +113,9 @@ public Output bValGrad() { return bValGrad; } + @OpInputsMetadata( + outputsClass = SparseAddGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D with shape {@code [nnz(sum)]}. The gradient with respect to diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java index 6064e201455..d978960f4e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -43,6 +45,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseBincount.OP_NAME, + inputsClass = SparseBincount.Inputs.class +) @Operator( group = "sparse" ) @@ -54,8 +60,8 @@ public final class SparseBincount extends RawOp implements Op private Output output; - private SparseBincount(Operation operation) { - super(operation); + public SparseBincount(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options binaryOutput(Boolean binaryOutput) { } } + @OpInputsMetadata( + outputsClass = SparseBincount.class + ) public static class Inputs extends RawOpInputs> { /** * 2D int64 {@code Tensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java index d35c0837991..81bd3c2b84c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -75,6 +77,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseConcat.OP_NAME, + inputsClass = SparseConcat.Inputs.class +) @Operator( group = "sparse" ) @@ -90,8 +96,8 @@ public final class SparseConcat extends RawOp { private Output outputShape; - private SparseConcat(Operation operation) { - super(operation); + public SparseConcat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -151,6 +157,9 @@ public Output outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseConcat.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. Indices of each input {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java index 7ddc90749d9..8c36581baff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConditionalAccumulator.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -43,6 +45,10 @@ * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. */ +@OpMetadata( + opType = SparseConditionalAccumulator.OP_NAME, + inputsClass = SparseConditionalAccumulator.Inputs.class +) @Operator( group = "sparse" ) @@ -54,8 +60,8 @@ public final class SparseConditionalAccumulator extends RawOp implements Operand private Output handle; - private SparseConditionalAccumulator(Operation operation) { - super(operation); + public SparseConditionalAccumulator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -189,6 +195,9 @@ public Options reductionType(String reductionType) { } } + @OpInputsMetadata( + outputsClass = SparseConditionalAccumulator.class + ) public static class Inputs extends RawOpInputs { /** * The type of the value being accumulated. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java index bee78d51b23..6c2bfdac1dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseCountSparseOutput.OP_NAME, + inputsClass = SparseCountSparseOutput.Inputs.class +) public final class SparseCountSparseOutput extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class SparseCountSparseOutput extends RawOp { private Output outputDenseShape; - private SparseCountSparseOutput(Operation operation) { - super(operation); + public SparseCountSparseOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -177,6 +183,9 @@ public Options maxlength(Long maxlength) { } } + @OpInputsMetadata( + outputsClass = SparseCountSparseOutput.class + ) public static class Inputs extends RawOpInputs> { /** * Tensor containing the indices of the sparse tensor to count. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java index 0208d01c554..de00b98597b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCross.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -72,6 +74,10 @@ * Fingerprint64("e"), Fingerprint64("c"))) * */ +@OpMetadata( + opType = SparseCross.OP_NAME, + inputsClass = SparseCross.Inputs.class +) @Operator( group = "sparse" ) @@ -87,8 +93,8 @@ public final class SparseCross extends RawOp { private Output outputShape; - private SparseCross(Operation operation) { - super(operation); + public SparseCross(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -149,6 +155,9 @@ public Output outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseCross.class + ) public static class Inputs extends RawOpInputs { /** * 2-D. Indices of each input {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java index af73abe7e8d..e2af02a02a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCrossHashed.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -72,6 +74,10 @@ * Fingerprint64("e"), Fingerprint64("c"))) * */ +@OpMetadata( + opType = SparseCrossHashed.OP_NAME, + inputsClass = SparseCrossHashed.Inputs.class +) @Operator( group = "sparse" ) @@ -87,8 +93,8 @@ public final class SparseCrossHashed extends RawOp { private Output outputShape; - private SparseCrossHashed(Operation operation) { - super(operation); + public SparseCrossHashed(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -155,6 +161,9 @@ public Output outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseCrossHashed.class + ) public static class Inputs extends RawOpInputs { /** * 2-D. Indices of each input {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java index 9f2e841c0a1..1e50002d9b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseDenseCwiseAdd.OP_NAME, + inputsClass = SparseDenseCwiseAdd.Inputs.class +) @Operator( group = "sparse" ) @@ -55,8 +61,8 @@ public final class SparseDenseCwiseAdd extends RawOp implements private Output output; - private SparseDenseCwiseAdd(Operation operation) { - super(operation); + public SparseDenseCwiseAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseDenseCwiseAdd.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java index 9b519b392ca..42b1734610f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseDenseCwiseDiv.OP_NAME, + inputsClass = SparseDenseCwiseDiv.Inputs.class +) @Operator( group = "sparse" ) @@ -50,8 +56,8 @@ public final class SparseDenseCwiseDiv extends RawOp implements private Output output; - private SparseDenseCwiseDiv(Operation operation) { - super(operation); + public SparseDenseCwiseDiv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseDenseCwiseDiv.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java index f316405eb7f..2a0e0b3507b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseDenseCwiseMul.OP_NAME, + inputsClass = SparseDenseCwiseMul.Inputs.class +) @Operator( group = "sparse" ) @@ -53,8 +59,8 @@ public final class SparseDenseCwiseMul extends RawOp implements private Output output; - private SparseDenseCwiseMul(Operation operation) { - super(operation); + public SparseDenseCwiseMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseDenseCwiseMul.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java index 6473358aad6..206c829d54a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -72,6 +74,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseFillEmptyRows.OP_NAME, + inputsClass = SparseFillEmptyRows.Inputs.class +) @Operator( group = "sparse" ) @@ -89,8 +95,8 @@ public final class SparseFillEmptyRows extends RawOp { private Output reverseIndexMap; - private SparseFillEmptyRows(Operation operation) { - super(operation); + public SparseFillEmptyRows(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -162,6 +168,9 @@ public Output reverseIndexMap() { return reverseIndexMap; } + @OpInputsMetadata( + outputsClass = SparseFillEmptyRows.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. the indices of the sparse tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java index 94f5033d574..8ee85db13ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -44,6 +46,10 @@ * * @param data type for {@code d_values} output */ +@OpMetadata( + opType = SparseFillEmptyRowsGrad.OP_NAME, + inputsClass = SparseFillEmptyRowsGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -57,8 +63,8 @@ public final class SparseFillEmptyRowsGrad extends RawOp { private Output dDefaultValue; - private SparseFillEmptyRowsGrad(Operation operation) { - super(operation); + public SparseFillEmptyRowsGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; dValues = operation.output(outputIdx++); dDefaultValue = operation.output(outputIdx++); @@ -102,6 +108,9 @@ public Output dDefaultValue() { return dDefaultValue; } + @OpInputsMetadata( + outputsClass = SparseFillEmptyRowsGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. The reverse index map from SparseFillEmptyRows. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java index 8c9388a0aaa..0c4f8a13b52 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseMatMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -43,6 +45,10 @@ *

    The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. */ +@OpMetadata( + opType = SparseMatMul.OP_NAME, + inputsClass = SparseMatMul.Inputs.class +) @Operator( group = "sparse" ) @@ -54,8 +60,8 @@ public final class SparseMatMul extends RawOp implements Operand { private Output product; - private SparseMatMul(Operation operation) { - super(operation); + public SparseMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -210,6 +216,9 @@ public Options bIsSparse(Boolean bIsSparse) { } } + @OpInputsMetadata( + outputsClass = SparseMatMul.class + ) public static class Inputs extends RawOpInputs { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java index 989e3f5c48a..ce1ee1affd3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseReduceMax.OP_NAME, + inputsClass = SparseReduceMax.Inputs.class +) @Operator( group = "sparse" ) @@ -59,8 +65,8 @@ public final class SparseReduceMax extends RawOp implements O private Output output; - private SparseReduceMax(Operation operation) { - super(operation); + public SparseReduceMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = SparseReduceMax.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java index 5ec12e79fa4..6728db570bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseReduceMaxSparse.OP_NAME, + inputsClass = SparseReduceMaxSparse.Inputs.class +) @Operator( group = "sparse" ) @@ -63,8 +69,8 @@ public final class SparseReduceMaxSparse extends RawOp { private Output outputShape; - private SparseReduceMaxSparse(Operation operation) { - super(operation); + public SparseReduceMaxSparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -163,6 +169,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = SparseReduceMaxSparse.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java index f81a5eff0cf..fca1832c967 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseReduceSum.OP_NAME, + inputsClass = SparseReduceSum.Inputs.class +) @Operator( group = "sparse" ) @@ -59,8 +65,8 @@ public final class SparseReduceSum extends RawOp implements Ope private Output output; - private SparseReduceSum(Operation operation) { - super(operation); + public SparseReduceSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = SparseReduceSum.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java index e97e512582a..01cee87946a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -48,6 +50,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseReduceSumSparse.OP_NAME, + inputsClass = SparseReduceSumSparse.Inputs.class +) @Operator( group = "sparse" ) @@ -63,8 +69,8 @@ public final class SparseReduceSumSparse extends RawOp { private Output outputShape; - private SparseReduceSumSparse(Operation operation) { - super(operation); + public SparseReduceSumSparse(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -163,6 +169,9 @@ public Options keepDims(Boolean keepDims) { } } + @OpInputsMetadata( + outputsClass = SparseReduceSumSparse.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java index 4f3ef7a762a..99e095e2123 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -43,6 +45,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseReorder.OP_NAME, + inputsClass = SparseReorder.Inputs.class +) @Operator( group = "sparse" ) @@ -56,8 +62,8 @@ public final class SparseReorder extends RawOp { private Output outputValues; - private SparseReorder(Operation operation) { - super(operation); + public SparseReorder(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -105,6 +111,9 @@ public Output outputValues() { return outputValues; } + @OpInputsMetadata( + outputsClass = SparseReorder.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java index 5b569b8ea6c..671461d6c96 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReshape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; @@ -45,6 +47,10 @@ * {@code input_shape} has length {@code R_in}, {@code output_indices} has shape {@code [N, R_out]}, and * {@code output_shape} has length {@code R_out}. */ +@OpMetadata( + opType = SparseReshape.OP_NAME, + inputsClass = SparseReshape.Inputs.class +) @Operator( group = "sparse" ) @@ -58,8 +64,8 @@ public final class SparseReshape extends RawOp { private Output outputShape; - private SparseReshape(Operation operation) { - super(operation); + public SparseReshape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputShape = operation.output(outputIdx++); @@ -108,6 +114,9 @@ public Output outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseReshape.class + ) public static class Inputs extends RawOpInputs { /** * 2-D. {@code N x R_in} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java index af9f65b4551..301de791181 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentMean.OP_NAME, + inputsClass = SparseSegmentMean.Inputs.class +) @Operator( group = "sparse" ) @@ -50,8 +56,8 @@ public final class SparseSegmentMean extends RawOp implements private Output output; - private SparseSegmentMean(Operation operation) { - super(operation); + public SparseSegmentMean(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentMean.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java index 80c3acc5da1..e0ec592149a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentMeanGrad.OP_NAME, + inputsClass = SparseSegmentMeanGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -50,8 +56,8 @@ public final class SparseSegmentMeanGrad extends RawOp implem private Output output; - private SparseSegmentMeanGrad(Operation operation) { - super(operation); + public SparseSegmentMeanGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentMeanGrad.class + ) public static class Inputs extends RawOpInputs> { /** * gradient propagated to the SparseSegmentMean op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java index 8fe4f039b62..4e254db04bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentMeanWithNumSegments.OP_NAME, + inputsClass = SparseSegmentMeanWithNumSegments.Inputs.class +) @Operator( group = "sparse" ) @@ -52,8 +58,8 @@ public final class SparseSegmentMeanWithNumSegments extends R private Output output; - private SparseSegmentMeanWithNumSegments(Operation operation) { - super(operation); + public SparseSegmentMeanWithNumSegments(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentMeanWithNumSegments.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java index f5b0a967137..4632ad9c8ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSqrtN.OP_NAME, + inputsClass = SparseSegmentSqrtN.Inputs.class +) @Operator( group = "sparse" ) @@ -49,8 +55,8 @@ public final class SparseSegmentSqrtN extends RawOp implement private Output output; - private SparseSegmentSqrtN(Operation operation) { - super(operation); + public SparseSegmentSqrtN(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSqrtN.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java index 2b5d8e00ae1..244e659d8f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSqrtNGrad.OP_NAME, + inputsClass = SparseSegmentSqrtNGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -50,8 +56,8 @@ public final class SparseSegmentSqrtNGrad extends RawOp imple private Output output; - private SparseSegmentSqrtNGrad(Operation operation) { - super(operation); + public SparseSegmentSqrtNGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSqrtNGrad.class + ) public static class Inputs extends RawOpInputs> { /** * gradient propagated to the SparseSegmentSqrtN op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java index 090732ab6e2..eaa499e8611 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSqrtNWithNumSegments.OP_NAME, + inputsClass = SparseSegmentSqrtNWithNumSegments.Inputs.class +) @Operator( group = "sparse" ) @@ -53,8 +59,8 @@ public final class SparseSegmentSqrtNWithNumSegments extends private Output output; - private SparseSegmentSqrtNWithNumSegments(Operation operation) { - super(operation); + public SparseSegmentSqrtNWithNumSegments(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSqrtNWithNumSegments.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java index 7c78dfd8392..cd66f0acde3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -62,6 +64,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSum.OP_NAME, + inputsClass = SparseSegmentSum.Inputs.class +) @Operator( group = "sparse" ) @@ -73,8 +79,8 @@ public final class SparseSegmentSum extends RawOp implements private Output output; - private SparseSegmentSum(Operation operation) { - super(operation); + public SparseSegmentSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -116,6 +122,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSum.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java index 3689868f892..2ce26681cc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSumGrad.OP_NAME, + inputsClass = SparseSegmentSumGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -50,8 +56,8 @@ public final class SparseSegmentSumGrad extends RawOp impleme private Output output; - private SparseSegmentSumGrad(Operation operation) { - super(operation); + public SparseSegmentSumGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSumGrad.class + ) public static class Inputs extends RawOpInputs> { /** * gradient propagated to the SparseSegmentSum op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java index 45c84027eec..8f9aa9d583c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -60,6 +62,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSegmentSumWithNumSegments.OP_NAME, + inputsClass = SparseSegmentSumWithNumSegments.Inputs.class +) @Operator( group = "sparse" ) @@ -71,8 +77,8 @@ public final class SparseSegmentSumWithNumSegments extends Ra private Output output; - private SparseSegmentSumWithNumSegments(Operation operation) { - super(operation); + public SparseSegmentSumWithNumSegments(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -117,6 +123,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSegmentSumWithNumSegments.class + ) public static class Inputs extends RawOpInputs> { /** * The data input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java index 881d26ef94c..c03d46a6507 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -53,6 +55,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseSlice.OP_NAME, + inputsClass = SparseSlice.Inputs.class +) @Operator( group = "sparse" ) @@ -68,8 +74,8 @@ public final class SparseSlice extends RawOp { private Output outputShape; - private SparseSlice(Operation operation) { - super(operation); + public SparseSlice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -133,6 +139,9 @@ public Output outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseSlice.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D tensor represents the indices of the sparse tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java index 0899e873513..d3686ba4ce9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -40,6 +42,10 @@ * * @param data type for {@code val_grad} output */ +@OpMetadata( + opType = SparseSliceGrad.OP_NAME, + inputsClass = SparseSliceGrad.Inputs.class +) @Operator( group = "sparse" ) @@ -51,8 +57,8 @@ public final class SparseSliceGrad extends RawOp implements Ope private Output valGrad; - private SparseSliceGrad(Operation operation) { - super(operation); + public SparseSliceGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; valGrad = operation.output(outputIdx++); } @@ -96,6 +102,9 @@ public Output asOutput() { return valGrad; } + @OpInputsMetadata( + outputsClass = SparseSliceGrad.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D. The gradient with respect to diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java index 0cc01eeee58..d0318d8905b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseSoftmax.OP_NAME, + inputsClass = SparseSoftmax.Inputs.class +) @Operator( group = "sparse" ) @@ -60,8 +66,8 @@ public final class SparseSoftmax extends RawOp implements Ope private Output output; - private SparseSoftmax(Operation operation) { - super(operation); + public SparseSoftmax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -103,6 +109,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseSoftmax.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code NNZ x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java index 6a36379bd8d..12f6247d4f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseSparseMaximum.OP_NAME, + inputsClass = SparseSparseMaximum.Inputs.class +) @Operator( group = "sparse" ) @@ -51,8 +57,8 @@ public final class SparseSparseMaximum extends RawOp { private Output outputValues; - private SparseSparseMaximum(Operation operation) { - super(operation); + public SparseSparseMaximum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -106,6 +112,9 @@ public Output outputValues() { return outputValues; } + @OpInputsMetadata( + outputsClass = SparseSparseMaximum.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java index 6a32e085cee..25a0e3e77f2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseSparseMinimum.OP_NAME, + inputsClass = SparseSparseMinimum.Inputs.class +) @Operator( group = "sparse" ) @@ -51,8 +57,8 @@ public final class SparseSparseMinimum extends RawOp { private Output outputValues; - private SparseSparseMinimum(Operation operation) { - super(operation); + public SparseSparseMinimum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputIndices = operation.output(outputIdx++); outputValues = operation.output(outputIdx++); @@ -106,6 +112,9 @@ public Output outputValues() { return outputValues; } + @OpInputsMetadata( + outputsClass = SparseSparseMinimum.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. {@code N x R} matrix with the indices of non-empty values in a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java index 0b830e4441f..2b5a5944fff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -56,6 +58,10 @@ * * @param data type for {@code output_values} output */ +@OpMetadata( + opType = SparseSplit.OP_NAME, + inputsClass = SparseSplit.Inputs.class +) @Operator( group = "sparse" ) @@ -72,8 +78,8 @@ public final class SparseSplit extends RawOp { private List> outputShape; @SuppressWarnings("unchecked") - private SparseSplit(Operation operation) { - super(operation); + public SparseSplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputIndicesLength = operation.outputListLength("output_indices"); outputIndices = Arrays.asList((Output[]) operation.outputList(outputIdx, outputIndicesLength)); @@ -144,6 +150,9 @@ public List> outputShape() { return outputShape; } + @OpInputsMetadata( + outputsClass = SparseSplit.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D. The dimension along which to split. Must be in the range diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java index e828e6476fb..08a1b4ca726 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SparseTensorDenseAdd.OP_NAME, + inputsClass = SparseTensorDenseAdd.Inputs.class +) @Operator( group = "sparse" ) @@ -49,8 +55,8 @@ public final class SparseTensorDenseAdd extends RawOp implement private Output output; - private SparseTensorDenseAdd(Operation operation) { - super(operation); + public SparseTensorDenseAdd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SparseTensorDenseAdd.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. The {@code indices} of the {@code SparseTensor}, with shape {@code [nnz, ndims]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java index 559b82d90c5..552aabfa24e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -46,6 +48,10 @@ * * @param data type for {@code product} output */ +@OpMetadata( + opType = SparseTensorDenseMatMul.OP_NAME, + inputsClass = SparseTensorDenseMatMul.Inputs.class +) @Operator( group = "sparse" ) @@ -57,8 +63,8 @@ public final class SparseTensorDenseMatMul extends RawOp implem private Output product; - private SparseTensorDenseMatMul(Operation operation) { - super(operation); + public SparseTensorDenseMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -171,6 +177,9 @@ public Options adjointB(Boolean adjointB) { } } + @OpInputsMetadata( + outputsClass = SparseTensorDenseMatMul.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D. The {@code indices} of the {@code SparseTensor}, size {@code [nnz, 2]} Matrix. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java index dac1f7569b9..bb2c0f1c955 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -53,6 +55,10 @@ * * @param data type for {@code dense} output */ +@OpMetadata( + opType = SparseToDense.OP_NAME, + inputsClass = SparseToDense.Inputs.class +) @Operator( group = "sparse" ) @@ -64,8 +70,8 @@ public final class SparseToDense extends RawOp implements Opera private Output dense; - private SparseToDense(Operation operation) { - super(operation); + public SparseToDense(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; dense = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = SparseToDense.class + ) public static class Inputs extends RawOpInputs> { /** * 0-D, 1-D, or 2-D. {@code sparse_indices[i]} contains the complete diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java index 1d4ea083ad6..179cd8d70ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -55,6 +57,10 @@ * * @param data type for {@code result_values} output */ +@OpMetadata( + opType = SparseToSparseSetOperation.OP_NAME, + inputsClass = SparseToSparseSetOperation.Inputs.class +) @Operator( group = "sparse" ) @@ -70,8 +76,8 @@ public final class SparseToSparseSetOperation extends RawOp { private Output resultShape; - private SparseToSparseSetOperation(Operation operation) { - super(operation); + public SparseToSparseSetOperation(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; resultIndices = operation.output(outputIdx++); resultValues = operation.output(outputIdx++); @@ -186,6 +192,9 @@ public Options validateIndices(Boolean validateIndices) { } } + @OpInputsMetadata( + outputsClass = SparseToSparseSetOperation.class + ) public static class Inputs extends RawOpInputs> { /** * 2D {@code Tensor}, indices of a {@code SparseTensor}. Must be in row-major diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java index eaec58d9901..2481608003d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -78,6 +80,10 @@ * * @param data type for {@code sparse_values} output */ +@OpMetadata( + opType = TakeManySparseFromTensorsMap.OP_NAME, + inputsClass = TakeManySparseFromTensorsMap.Inputs.class +) @Operator( group = "sparse" ) @@ -93,8 +99,8 @@ public final class TakeManySparseFromTensorsMap extends RawOp { private Output sparseShape; - private TakeManySparseFromTensorsMap(Operation operation) { - super(operation); + public TakeManySparseFromTensorsMap(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sparseIndices = operation.output(outputIdx++); sparseValues = operation.output(outputIdx++); @@ -219,6 +225,9 @@ public Options sharedName(String sharedName) { } } + @OpInputsMetadata( + outputsClass = TakeManySparseFromTensorsMap.class + ) public static class Inputs extends RawOpInputs> { /** * 1-D, The {@code N} serialized {@code SparseTensor} objects. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java index 6985e0283c7..6705ae83853 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Join.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -45,6 +47,10 @@ * * */ +@OpMetadata( + opType = Join.OP_NAME, + inputsClass = Join.Inputs.class +) @Operator( group = "strings" ) @@ -56,8 +62,8 @@ public final class Join extends RawOp implements Operand { private Output output; - private Join(Operation operation) { - super(operation); + public Join(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -133,6 +139,9 @@ public Options separator(String separator) { } } + @OpInputsMetadata( + outputsClass = Join.class + ) public static class Inputs extends RawOpInputs { /** * A list of string tensors. The tensors must all have the same shape, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java index 83f55b3ef0f..f14268de14a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Lower.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -42,6 +44,10 @@ * * */ +@OpMetadata( + opType = Lower.OP_NAME, + inputsClass = Lower.Inputs.class +) @Operator( group = "strings" ) @@ -53,8 +59,8 @@ public final class Lower extends RawOp implements Operand { private Output output; - private Lower(Operation operation) { - super(operation); + public Lower(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options encoding(String encoding) { } } + @OpInputsMetadata( + outputsClass = Lower.class + ) public static class Inputs extends RawOpInputs { /** * The input to be lower-cased. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java index 2e9e709d68c..b33f61736ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ReduceJoin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -55,6 +57,10 @@ * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" * */ +@OpMetadata( + opType = ReduceJoin.OP_NAME, + inputsClass = ReduceJoin.Inputs.class +) @Operator( group = "strings" ) @@ -66,8 +72,8 @@ public final class ReduceJoin extends RawOp implements Operand { private Output output; - private ReduceJoin(Operation operation) { - super(operation); + public ReduceJoin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -173,6 +179,9 @@ public Options separator(String separator) { } } + @OpInputsMetadata( + outputsClass = ReduceJoin.class + ) public static class Inputs extends RawOpInputs { /** * The input to be joined. All reduced indices must have non-zero size. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java index 31157d96864..6d6333859c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexFullMatch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBool; import org.tensorflow.types.TString; @@ -50,6 +52,10 @@ * * */ +@OpMetadata( + opType = RegexFullMatch.OP_NAME, + inputsClass = RegexFullMatch.Inputs.class +) @Operator( group = "strings" ) @@ -61,8 +67,8 @@ public final class RegexFullMatch extends RawOp implements Operand { private Output output; - private RegexFullMatch(Operation operation) { - super(operation); + public RegexFullMatch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -100,6 +106,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RegexFullMatch.class + ) public static class Inputs extends RawOpInputs { /** * A string tensor of the text to be processed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java index ca767d27f1e..1e26c1d06db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/RegexReplace.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ * replacement string provided in {@code rewrite}. * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) */ +@OpMetadata( + opType = RegexReplace.OP_NAME, + inputsClass = RegexReplace.Inputs.class +) @Operator( group = "strings" ) @@ -46,8 +52,8 @@ public final class RegexReplace extends RawOp implements Operand { private Output output; - private RegexReplace(Operation operation) { - super(operation); + public RegexReplace(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -131,6 +137,9 @@ public Options replaceGlobal(Boolean replaceGlobal) { } } + @OpInputsMetadata( + outputsClass = RegexReplace.class + ) public static class Inputs extends RawOpInputs { /** * The text to be processed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java index d101be0d46a..a45599ddc4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexFullMatch.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TBool; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * if the input matches the regex pattern provided. *

    The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) */ +@OpMetadata( + opType = StaticRegexFullMatch.OP_NAME, + inputsClass = StaticRegexFullMatch.Inputs.class +) public final class StaticRegexFullMatch extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class StaticRegexFullMatch extends RawOp implements Operand private Output output; - private StaticRegexFullMatch(Operation operation) { - super(operation); + public StaticRegexFullMatch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = StaticRegexFullMatch.class + ) public static class Inputs extends RawOpInputs { /** * A string tensor of the text to be processed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java index fdbb0c3e4da..a72312ffe18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StaticRegexReplace.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; /** * Replaces the match of pattern in input with rewrite. * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) */ +@OpMetadata( + opType = StaticRegexReplace.OP_NAME, + inputsClass = StaticRegexReplace.Inputs.class +) public final class StaticRegexReplace extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StaticRegexReplace extends RawOp implements Operand private Output output; - private StaticRegexReplace(Operation operation) { - super(operation); + public StaticRegexReplace(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -123,6 +129,9 @@ public Options replaceGlobal(Boolean replaceGlobal) { } } + @OpInputsMetadata( + outputsClass = StaticRegexReplace.class + ) public static class Inputs extends RawOpInputs { /** * The text to be processed. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java index a7becde73ec..806cdb57184 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringFormat.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -36,6 +38,10 @@ * Formats a string template using a list of tensors. * Formats a string template using a list of tensors, pretty-printing tensor summaries. */ +@OpMetadata( + opType = StringFormat.OP_NAME, + inputsClass = StringFormat.Inputs.class +) @Operator( group = "strings" ) @@ -47,8 +53,8 @@ public final class StringFormat extends RawOp implements Operand { private Output output; - private StringFormat(Operation operation) { - super(operation); + public StringFormat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -174,6 +180,9 @@ public Options summarize(Long summarize) { } } + @OpInputsMetadata( + outputsClass = StringFormat.class + ) public static class Inputs extends RawOpInputs { /** * The list of tensors to format into the placeholder string. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java index 98aa6dbec32..65f27f3f431 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringLength.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -46,6 +48,10 @@ * * */ +@OpMetadata( + opType = StringLength.OP_NAME, + inputsClass = StringLength.Inputs.class +) @Operator( group = "strings" ) @@ -57,8 +63,8 @@ public final class StringLength extends RawOp implements Operand { private Output output; - private StringLength(Operation operation) { - super(operation); + public StringLength(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options unit(String unit) { } } + @OpInputsMetadata( + outputsClass = StringLength.class + ) public static class Inputs extends RawOpInputs { /** * The strings for which to compute the length for each element. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java index 49d00162650..fe203cd8415 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -41,6 +43,10 @@ * * @param data type for {@code ngrams_splits} output */ +@OpMetadata( + opType = StringNGrams.OP_NAME, + inputsClass = StringNGrams.Inputs.class +) @Operator( group = "strings" ) @@ -54,8 +60,8 @@ public final class StringNGrams extends RawOp { private Output ngramsSplits; - private StringNGrams(Operation operation) { - super(operation); + public StringNGrams(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; ngrams = operation.output(outputIdx++); ngramsSplits = operation.output(outputIdx++); @@ -122,6 +128,9 @@ public Output ngramsSplits() { return ngramsSplits; } + @OpInputsMetadata( + outputsClass = StringNGrams.class + ) public static class Inputs extends RawOpInputs> { /** * The values tensor of the ragged string tensor to make ngrams out of. Must be a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java index 499cc5269c0..41c6a344a41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringSplit.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -55,6 +57,10 @@ * leading or trailing whitespace. *

    Note that the above mentioned behavior matches python's str.split. */ +@OpMetadata( + opType = StringSplit.OP_NAME, + inputsClass = StringSplit.Inputs.class +) @Operator( group = "strings" ) @@ -70,8 +76,8 @@ public final class StringSplit extends RawOp { private Output shape; - private StringSplit(Operation operation) { - super(operation); + public StringSplit(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; indices = operation.output(outputIdx++); values = operation.output(outputIdx++); @@ -163,6 +169,9 @@ public Options maxsplit(Long maxsplit) { } } + @OpInputsMetadata( + outputsClass = StringSplit.class + ) public static class Inputs extends RawOpInputs { /** * {@code 1-D} string {@code Tensor}, the strings to split. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java index b5aefbb65ae..fcb125dbca9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Strip.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -42,6 +44,10 @@ * * */ +@OpMetadata( + opType = Strip.OP_NAME, + inputsClass = Strip.Inputs.class +) @Operator( group = "strings" ) @@ -53,8 +59,8 @@ public final class Strip extends RawOp implements Operand { private Output output; - private Strip(Operation operation) { - super(operation); + public Strip(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Strip.class + ) public static class Inputs extends RawOpInputs { /** * A string {@code Tensor} of any shape. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java index acfebc4fe95..a8f3f7cc178 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Substr.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -102,6 +104,10 @@ *

  • {@code ValueError}: If {@code pos} and {@code len} are not the same shape.
  • * */ +@OpMetadata( + opType = Substr.OP_NAME, + inputsClass = Substr.Inputs.class +) @Operator( group = "strings" ) @@ -113,8 +119,8 @@ public final class Substr extends RawOp implements Operand { private Output output; - private Substr(Operation operation) { - super(operation); + public Substr(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -202,6 +208,9 @@ public Options unit(String unit) { } } + @OpInputsMetadata( + outputsClass = Substr.class + ) public static class Inputs extends RawOpInputs { /** * Tensor of strings diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java index f801507c1ad..8dec93b94cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucket.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ * This functionality will be deprecated and it's recommended to use * {@code tf.string_to_hash_bucket_fast()} or {@code tf.string_to_hash_bucket_strong()}. */ +@OpMetadata( + opType = ToHashBucket.OP_NAME, + inputsClass = ToHashBucket.Inputs.class +) @Operator( group = "strings" ) @@ -50,8 +56,8 @@ public final class ToHashBucket extends RawOp implements Operand { private Output output; - private ToHashBucket(Operation operation) { - super(operation); + public ToHashBucket(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ToHashBucket.class + ) public static class Inputs extends RawOpInputs { /** * The stringTensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java index 39cdf8a5405..a35fe637b42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketFast.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -49,6 +51,10 @@ * * */ +@OpMetadata( + opType = ToHashBucketFast.OP_NAME, + inputsClass = ToHashBucketFast.Inputs.class +) @Operator( group = "strings" ) @@ -60,8 +66,8 @@ public final class ToHashBucketFast extends RawOp implements Operand { private Output output; - private ToHashBucketFast(Operation operation) { - super(operation); + public ToHashBucketFast(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ToHashBucketFast.class + ) public static class Inputs extends RawOpInputs { /** * The strings to assign a hash bucket. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java index f9ca9b8115f..641e5d41e87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToHashBucketStrong.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -55,6 +57,10 @@ * * */ +@OpMetadata( + opType = ToHashBucketStrong.OP_NAME, + inputsClass = ToHashBucketStrong.Inputs.class +) @Operator( group = "strings" ) @@ -66,8 +72,8 @@ public final class ToHashBucketStrong extends RawOp implements Operand { private Output output; - private ToHashBucketStrong(Operation operation) { - super(operation); + public ToHashBucketStrong(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -112,6 +118,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ToHashBucketStrong.class + ) public static class Inputs extends RawOpInputs { /** * The strings to assign a hash bucket. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java index f8f325282fc..3f58cbadd07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; @@ -51,6 +53,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ToNumber.OP_NAME, + inputsClass = ToNumber.Inputs.class +) @Operator( group = "strings" ) @@ -62,8 +68,8 @@ public final class ToNumber extends RawOp implements Operand< private Output output; - private ToNumber(Operation operation) { - super(operation); + public ToNumber(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -116,6 +122,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ToNumber.class + ) public static class Inputs extends RawOpInputs> { /** * The stringTensor input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java index a130514dc54..9929ce78a97 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -52,6 +54,10 @@ * * @param data type for {@code row_splits} output */ +@OpMetadata( + opType = UnicodeDecode.OP_NAME, + inputsClass = UnicodeDecode.Inputs.class +) public final class UnicodeDecode extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -62,8 +68,8 @@ public final class UnicodeDecode extends RawOp { private Output charValues; - private UnicodeDecode(Operation operation) { - super(operation); + public UnicodeDecode(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; rowSplits = operation.output(outputIdx++); charValues = operation.output(outputIdx++); @@ -241,6 +247,9 @@ public Options replaceControlCharacters(Boolean replaceControlCharacters) { } } + @OpInputsMetadata( + outputsClass = UnicodeDecode.class + ) public static class Inputs extends RawOpInputs> { /** * The text to be decoded. Can have any shape. Note that the output is flattened diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java index c3467e760b0..a186bea2a79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -56,6 +58,10 @@ * * @param data type for {@code row_splits} output */ +@OpMetadata( + opType = UnicodeDecodeWithOffsets.OP_NAME, + inputsClass = UnicodeDecodeWithOffsets.Inputs.class +) public final class UnicodeDecodeWithOffsets extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -68,8 +74,8 @@ public final class UnicodeDecodeWithOffsets extends RawOp { private Output charToByteStarts; - private UnicodeDecodeWithOffsets(Operation operation) { - super(operation); + public UnicodeDecodeWithOffsets(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; rowSplits = operation.output(outputIdx++); charValues = operation.output(outputIdx++); @@ -258,6 +264,9 @@ public Options replaceControlCharacters(Boolean replaceControlCharacters) { } } + @OpInputsMetadata( + outputsClass = UnicodeDecodeWithOffsets.class + ) public static class Inputs extends RawOpInputs> { /** * The text to be decoded. Can have any shape. Note that the output is flattened diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java index ffa29a2ddb4..a14c69eea1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeEncode.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -47,6 +49,10 @@ * output = ['Hello', 'World'] * */ +@OpMetadata( + opType = UnicodeEncode.OP_NAME, + inputsClass = UnicodeEncode.Inputs.class +) public final class UnicodeEncode extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -55,8 +61,8 @@ public final class UnicodeEncode extends RawOp implements Operand { private Output output; - private UnicodeEncode(Operation operation) { - super(operation); + public UnicodeEncode(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -181,6 +187,9 @@ public Options replacementChar(Long replacementChar) { } } + @OpInputsMetadata( + outputsClass = UnicodeEncode.class + ) public static class Inputs extends RawOpInputs { /** * A 1D tensor containing the unicode codepoints that should be encoded. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java index ce046508f36..7d63d69a16a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeScript.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; @@ -52,6 +54,10 @@ * * */ +@OpMetadata( + opType = UnicodeScript.OP_NAME, + inputsClass = UnicodeScript.Inputs.class +) @Operator( group = "strings" ) @@ -63,8 +69,8 @@ public final class UnicodeScript extends RawOp implements Operand { private Output output; - private UnicodeScript(Operation operation) { - super(operation); + public UnicodeScript(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -99,6 +105,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = UnicodeScript.class + ) public static class Inputs extends RawOpInputs { /** * A Tensor of int32 Unicode code points. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java index 90194f8b23a..a433dddf82f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeTranscode.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -68,6 +70,10 @@ * * */ +@OpMetadata( + opType = UnicodeTranscode.OP_NAME, + inputsClass = UnicodeTranscode.Inputs.class +) @Operator( group = "strings" ) @@ -79,8 +85,8 @@ public final class UnicodeTranscode extends RawOp implements Operand { private Output output; - private UnicodeTranscode(Operation operation) { - super(operation); + public UnicodeTranscode(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -241,6 +247,9 @@ public Options replaceControlCharacters(Boolean replaceControlCharacters) { } } + @OpInputsMetadata( + outputsClass = UnicodeTranscode.class + ) public static class Inputs extends RawOpInputs { /** * The text to be processed. Can have any shape. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java index 58b1eb8ac3b..9d3e0484d58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnsortedSegmentJoin.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -59,6 +61,10 @@ * # output_array ==> ['this:is:a:test'] * */ +@OpMetadata( + opType = UnsortedSegmentJoin.OP_NAME, + inputsClass = UnsortedSegmentJoin.Inputs.class +) @Operator( group = "strings" ) @@ -70,8 +76,8 @@ public final class UnsortedSegmentJoin extends RawOp implements Operand private Output output; - private UnsortedSegmentJoin(Operation operation) { - super(operation); + public UnsortedSegmentJoin(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -152,6 +158,9 @@ public Options separator(String separator) { } } + @OpInputsMetadata( + outputsClass = UnsortedSegmentJoin.class + ) public static class Inputs extends RawOpInputs { /** * The input to be joined. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java index 7ba066da30c..84378ddac55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/Upper.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -42,6 +44,10 @@ * * */ +@OpMetadata( + opType = Upper.OP_NAME, + inputsClass = Upper.Inputs.class +) @Operator( group = "strings" ) @@ -53,8 +59,8 @@ public final class Upper extends RawOp implements Operand { private Output output; - private Upper(Operation operation) { - super(operation); + public Upper(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options encoding(String encoding) { } } + @OpInputsMetadata( + outputsClass = Upper.class + ) public static class Inputs extends RawOpInputs { /** * The input to be upper-cased. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java index 9a78e8b1bfc..a27777b8eab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/AudioSummary.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TString; @@ -44,6 +46,10 @@ * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc. * */ +@OpMetadata( + opType = AudioSummary.OP_NAME, + inputsClass = AudioSummary.Inputs.class +) @Operator( group = "summary" ) @@ -55,8 +61,8 @@ public final class AudioSummary extends RawOp implements Operand { private Output summary; - private AudioSummary(Operation operation) { - super(operation); + public AudioSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -135,6 +141,9 @@ public Options maxOutputs(Long maxOutputs) { } } + @OpInputsMetadata( + outputsClass = AudioSummary.class + ) public static class Inputs extends RawOpInputs { /** * Scalar. Used to build the {@code tag} attribute of the summary values. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java index 58b8dcef495..3cd27230947 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CloseSummaryWriter.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The CloseSummaryWriter operation */ +@OpMetadata( + opType = CloseSummaryWriter.OP_NAME, + inputsClass = CloseSummaryWriter.Inputs.class +) public final class CloseSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "CloseSummaryWriter"; - private CloseSummaryWriter(Operation operation) { - super(operation); + public CloseSummaryWriter(Operation operation) { + super(operation, OP_NAME); } /** @@ -57,6 +63,9 @@ public static CloseSummaryWriter create(Scope scope, Operand wr return new CloseSummaryWriter(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = CloseSummaryWriter.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java index 8af194417b1..3195ee604cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryDbWriter.java @@ -26,20 +26,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * The CreateSummaryDbWriter operation */ +@OpMetadata( + opType = CreateSummaryDbWriter.OP_NAME, + inputsClass = CreateSummaryDbWriter.Inputs.class +) public final class CreateSummaryDbWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "CreateSummaryDbWriter"; - private CreateSummaryDbWriter(Operation operation) { - super(operation); + public CreateSummaryDbWriter(Operation operation) { + super(operation, OP_NAME); } /** @@ -68,6 +74,9 @@ public static CreateSummaryDbWriter create(Scope scope, Operand return new CreateSummaryDbWriter(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = CreateSummaryDbWriter.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java index aecb55a68f7..af2bc01a83c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/CreateSummaryFileWriter.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -33,14 +35,18 @@ /** * The CreateSummaryFileWriter operation */ +@OpMetadata( + opType = CreateSummaryFileWriter.OP_NAME, + inputsClass = CreateSummaryFileWriter.Inputs.class +) public final class CreateSummaryFileWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "CreateSummaryFileWriter"; - private CreateSummaryFileWriter(Operation operation) { - super(operation); + public CreateSummaryFileWriter(Operation operation) { + super(operation, OP_NAME); } /** @@ -69,6 +75,9 @@ public static CreateSummaryFileWriter create(Scope scope, Operand { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java index 44f22343c23..3da6797c325 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/FlushSummaryWriter.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The FlushSummaryWriter operation */ +@OpMetadata( + opType = FlushSummaryWriter.OP_NAME, + inputsClass = FlushSummaryWriter.Inputs.class +) public final class FlushSummaryWriter extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "FlushSummaryWriter"; - private FlushSummaryWriter(Operation operation) { - super(operation); + public FlushSummaryWriter(Operation operation) { + super(operation, OP_NAME); } /** @@ -57,6 +63,9 @@ public static FlushSummaryWriter create(Scope scope, Operand wr return new FlushSummaryWriter(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = FlushSummaryWriter.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java index 6e05d278830..7762807eb96 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/HistogramSummary.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ * has one summary value containing a histogram for {@code values}. *

    This op reports an {@code InvalidArgument} error if any value is not finite. */ +@OpMetadata( + opType = HistogramSummary.OP_NAME, + inputsClass = HistogramSummary.Inputs.class +) @Operator( group = "summary" ) @@ -50,8 +56,8 @@ public final class HistogramSummary extends RawOp implements Operand { private Output summary; - private HistogramSummary(Operation operation) { - super(operation); + public HistogramSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = HistogramSummary.class + ) public static class Inputs extends RawOpInputs { /** * Scalar. Tag to use for the {@code Summary.Value}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java index ff83df181ad..6bd4dcf1989 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImageSummary.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -71,6 +73,10 @@ * replaced by this tensor in the output image. The default value is the color * red. */ +@OpMetadata( + opType = ImageSummary.OP_NAME, + inputsClass = ImageSummary.Inputs.class +) @Operator( group = "summary" ) @@ -82,8 +88,8 @@ public final class ImageSummary extends RawOp implements Operand { private Output summary; - private ImageSummary(Operation operation) { - super(operation); + public ImageSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -187,6 +193,9 @@ public Options badColor(Tensor badColor) { } } + @OpInputsMetadata( + outputsClass = ImageSummary.class + ) public static class Inputs extends RawOpInputs { /** * Scalar. Used to build the {@code tag} attribute of the summary values. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java index 0150201909b..5953f0f0bbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ImportEvent.java @@ -26,20 +26,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * The ImportEvent operation */ +@OpMetadata( + opType = ImportEvent.OP_NAME, + inputsClass = ImportEvent.Inputs.class +) public final class ImportEvent extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ImportEvent"; - private ImportEvent(Operation operation) { - super(operation); + public ImportEvent(Operation operation) { + super(operation, OP_NAME); } /** @@ -61,6 +67,9 @@ public static ImportEvent create(Scope scope, Operand writer, return new ImportEvent(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ImportEvent.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java index 643d36d2894..5437e3399d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/MergeSummary.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -40,6 +42,10 @@ *

    When the Op is run, it reports an {@code InvalidArgument} error if multiple values * in the summaries to merge use the same tag. */ +@OpMetadata( + opType = MergeSummary.OP_NAME, + inputsClass = MergeSummary.Inputs.class +) @Operator( group = "summary" ) @@ -51,8 +57,8 @@ public final class MergeSummary extends RawOp implements Operand { private Output summary; - private MergeSummary(Operation operation) { - super(operation); + public MergeSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = MergeSummary.class + ) public static class Inputs extends RawOpInputs { /** * Can be of any shape. Each must contain serialized {@code Summary} protocol diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java index c459ba8413a..259972a5e49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/ScalarSummary.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -37,6 +39,10 @@ * The input {@code tags} and {@code values} must have the same shape. The generated summary * has a summary value for each tag-value pair in {@code tags} and {@code values}. */ +@OpMetadata( + opType = ScalarSummary.OP_NAME, + inputsClass = ScalarSummary.Inputs.class +) @Operator( group = "summary" ) @@ -48,8 +54,8 @@ public final class ScalarSummary extends RawOp implements Operand { private Output summary; - private ScalarSummary(Operation operation) { - super(operation); + public ScalarSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = ScalarSummary.class + ) public static class Inputs extends RawOpInputs { /** * Tags for the summary. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java index d1eeb17ac2f..a074ab65484 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/StatsAggregatorSummary.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; /** * Produces a summary of any statistics recorded by the given statistics manager. */ +@OpMetadata( + opType = StatsAggregatorSummary.OP_NAME, + inputsClass = StatsAggregatorSummary.Inputs.class +) public final class StatsAggregatorSummary extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class StatsAggregatorSummary extends RawOp implements Operand summary; - private StatsAggregatorSummary(Operation operation) { - super(operation); + public StatsAggregatorSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = StatsAggregatorSummary.class + ) public static class Inputs extends RawOpInputs { /** * The iterator input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java index c3a9d913d76..2e4af55ec82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/SummaryWriter.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * The SummaryWriter operation */ +@OpMetadata( + opType = SummaryWriter.OP_NAME, + inputsClass = SummaryWriter.Inputs.class +) public final class SummaryWriter extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class SummaryWriter extends RawOp implements Operand { private Output writer; @SuppressWarnings("unchecked") - private SummaryWriter(Operation operation) { - super(operation); + public SummaryWriter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; writer = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options container(String container) { } } + @OpInputsMetadata( + outputsClass = SummaryWriter.class + ) public static class Inputs extends RawOpInputs { /** * The sharedName attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java index ccd1dcb3c67..d828e584e31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/TensorSummary.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ /** * Outputs a {@code Summary} protocol buffer with a tensor and per-plugin data. */ +@OpMetadata( + opType = TensorSummary.OP_NAME, + inputsClass = TensorSummary.Inputs.class +) @Operator( group = "summary" ) @@ -46,8 +52,8 @@ public final class TensorSummary extends RawOp implements Operand { private Output summary; - private TensorSummary(Operation operation) { - super(operation); + public TensorSummary(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; summary = operation.output(outputIdx++); } @@ -88,6 +94,9 @@ public Output asOutput() { return summary; } + @OpInputsMetadata( + outputsClass = TensorSummary.class + ) public static class Inputs extends RawOpInputs { /** * A string attached to this summary. Used for organization in TensorBoard. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java index 03c11c27c74..7b00a14229b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteAudioSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -36,14 +38,18 @@ * Writes encoded audio summary {@code tensor} at {@code step} with {@code tag} using summary {@code writer}. * {@code sample_rate} is the audio sample rate is Hz. */ +@OpMetadata( + opType = WriteAudioSummary.OP_NAME, + inputsClass = WriteAudioSummary.Inputs.class +) public final class WriteAudioSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteAudioSummary"; - private WriteAudioSummary(Operation operation) { - super(operation); + public WriteAudioSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -111,6 +117,9 @@ public Options maxOutputs(Long maxOutputs) { } } + @OpInputsMetadata( + outputsClass = WriteAudioSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java index 791f2e53025..811f79441b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteGraphSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Writes a graph summary. * Writes TensorFlow graph {@code tensor} at {@code step} using summary {@code writer}. */ +@OpMetadata( + opType = WriteGraphSummary.OP_NAME, + inputsClass = WriteGraphSummary.Inputs.class +) public final class WriteGraphSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteGraphSummary"; - private WriteGraphSummary(Operation operation) { - super(operation); + public WriteGraphSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static WriteGraphSummary create(Scope scope, Operand wri return new WriteGraphSummary(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteGraphSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java index 93f3c00c93e..24fd3c8eda9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteHistogramSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,14 +37,18 @@ * Writes a histogram summary. * Writes histogram {@code values} at {@code step} with {@code tag} using summary {@code writer}. */ +@OpMetadata( + opType = WriteHistogramSummary.OP_NAME, + inputsClass = WriteHistogramSummary.Inputs.class +) public final class WriteHistogramSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteHistogramSummary"; - private WriteHistogramSummary(Operation operation) { - super(operation); + public WriteHistogramSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -68,6 +74,9 @@ public static WriteHistogramSummary create(Scope scope, Operand return new WriteHistogramSummary(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteHistogramSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java index 9b1b64ae148..a2437bfed6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteImageSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -38,14 +40,18 @@ * Writes image {@code tensor} at {@code step} with {@code tag} using summary {@code writer}. * {@code tensor} is image with shape [height, width, channels]. */ +@OpMetadata( + opType = WriteImageSummary.OP_NAME, + inputsClass = WriteImageSummary.Inputs.class +) public final class WriteImageSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteImageSummary"; - private WriteImageSummary(Operation operation) { - super(operation); + public WriteImageSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -113,6 +119,9 @@ public Options maxImages(Long maxImages) { } } + @OpInputsMetadata( + outputsClass = WriteImageSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java index 66b92490f04..3633dd822ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteRawProtoSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Writes a serialized proto summary. * Writes {@code tensor}, a serialized proto at {@code step} using summary {@code writer}. */ +@OpMetadata( + opType = WriteRawProtoSummary.OP_NAME, + inputsClass = WriteRawProtoSummary.Inputs.class +) public final class WriteRawProtoSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteRawProtoSummary"; - private WriteRawProtoSummary(Operation operation) { - super(operation); + public WriteRawProtoSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -65,6 +71,9 @@ public static WriteRawProtoSummary create(Scope scope, Operand return new WriteRawProtoSummary(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteRawProtoSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java index 3edd2898fd6..7412f19c50c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteScalarSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -36,14 +38,18 @@ * Writes a scalar summary. * Writes scalar {@code value} at {@code step} with {@code tag} using summary {@code writer}. */ +@OpMetadata( + opType = WriteScalarSummary.OP_NAME, + inputsClass = WriteScalarSummary.Inputs.class +) public final class WriteScalarSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteScalarSummary"; - private WriteScalarSummary(Operation operation) { - super(operation); + public WriteScalarSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -69,6 +75,9 @@ public static WriteScalarSummary create(Scope scope, Operand wr return new WriteScalarSummary(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteScalarSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java index 0c1f7822242..fd2b37c3b8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/summary/WriteSummary.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,14 +37,18 @@ * Writes a tensor summary. * Writes {@code tensor} at {@code step} with {@code tag} using summary {@code writer}. */ +@OpMetadata( + opType = WriteSummary.OP_NAME, + inputsClass = WriteSummary.Inputs.class +) public final class WriteSummary extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "WriteSummary"; - private WriteSummary(Operation operation) { - super(operation); + public WriteSummary(Operation operation) { + super(operation, OP_NAME); } /** @@ -71,6 +77,9 @@ public static WriteSummary create(Scope scope, Operand writer, return new WriteSummary(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = WriteSummary.class + ) public static class Inputs extends RawOpInputs { /** * The writer input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java index f5cf0420941..37a0862c547 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -49,6 +51,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = AllToAll.OP_NAME, + inputsClass = AllToAll.Inputs.class +) public final class AllToAll extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -57,8 +63,8 @@ public final class AllToAll extends RawOp implements Operand private Output output; - private AllToAll(Operation operation) { - super(operation); + public AllToAll(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -106,6 +112,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = AllToAll.class + ) public static class Inputs extends RawOpInputs> { /** * The local input to the sum. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java index 0741956dcc7..c98049a5299 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CollectivePermute.OP_NAME, + inputsClass = CollectivePermute.Inputs.class +) public final class CollectivePermute extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class CollectivePermute extends RawOp implements O private Output output; - private CollectivePermute(Operation operation) { - super(operation); + public CollectivePermute(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -89,6 +95,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CollectivePermute.class + ) public static class Inputs extends RawOpInputs> { /** * The local input to be permuted. Currently only supports float and diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java index 450c390d6e2..a18e76ed0a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompilationResult.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; /** @@ -35,6 +37,10 @@ * CompilationResultProto, which holds a status and an error message if an error * occurred during compilation. */ +@OpMetadata( + opType = CompilationResult.OP_NAME, + inputsClass = CompilationResult.Inputs.class +) public final class CompilationResult extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class CompilationResult extends RawOp implements Operand { private Output output; - private CompilationResult(Operation operation) { - super(operation); + public CompilationResult(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CompilationResult.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new CompilationResult(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Compile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Compile.java index ecf0ae4cc3f..f45cc4f701a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Compile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Compile.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; @@ -53,6 +55,10 @@ * used to look up the program in the compilation cache. * 'may_modify_variables' indicates whether variables may be modified. */ +@OpMetadata( + opType = Compile.OP_NAME, + inputsClass = Compile.Inputs.class +) @Operator( group = "tpu" ) @@ -69,8 +75,8 @@ public final class Compile extends RawOp { private List> mayModifyVariables; @SuppressWarnings("unchecked") - private Compile(Operation operation) { - super(operation); + public Compile(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; compilationStatus = operation.output(outputIdx++); int programLength = operation.outputListLength("program"); @@ -134,6 +140,9 @@ public List> mayModifyVariables() { return mayModifyVariables; } + @OpInputsMetadata( + outputsClass = Compile.class + ) public static class Inputs extends RawOpInputs { /** * The dynamicShapes input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java index d4a2b599e91..53b64ef5482 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CompileSucceededAssert.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ * pending device interactions fail. *

    'compilation_status' is a serialized CompilationResultProto. */ +@OpMetadata( + opType = CompileSucceededAssert.OP_NAME, + inputsClass = CompileSucceededAssert.Inputs.class +) @Operator( group = "tpu" ) @@ -44,8 +50,8 @@ public final class CompileSucceededAssert extends RawOp { */ public static final String OP_NAME = "TPUCompileSucceededAssert"; - private CompileSucceededAssert(Operation operation) { - super(operation); + public CompileSucceededAssert(Operation operation) { + super(operation, OP_NAME); } /** @@ -64,6 +70,9 @@ public static CompileSucceededAssert create(Scope scope, Operand compil return new CompileSucceededAssert(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = CompileSucceededAssert.class + ) public static class Inputs extends RawOpInputs { /** * The compilationStatus input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java index 8c951410670..6cc6432a70d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java @@ -27,11 +27,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; /** * Sets up the centralized structures for a distributed TPU system. */ +@OpMetadata( + opType = ConfigureDistributedTPU.OP_NAME, + inputsClass = ConfigureDistributedTPU.Inputs.class +) public final class ConfigureDistributedTPU extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -40,8 +46,8 @@ public final class ConfigureDistributedTPU extends RawOp implements Operand topology; - private ConfigureDistributedTPU(Operation operation) { - super(operation); + public ConfigureDistributedTPU(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; topology = operation.output(outputIdx++); } @@ -220,6 +226,9 @@ public Options compilationFailureClosesChips(Boolean compilationFailureClosesChi } } + @OpInputsMetadata( + outputsClass = ConfigureDistributedTPU.class + ) public static class Inputs extends RawOpInputs { /** * Reserved. Do not use. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java index 029bcc9974a..fc8b8be7b96 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureTPUEmbedding.java @@ -25,18 +25,24 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; /** * Sets up TPUEmbedding in a distributed TPU system. */ +@OpMetadata( + opType = ConfigureTPUEmbedding.OP_NAME, + inputsClass = ConfigureTPUEmbedding.Inputs.class +) public final class ConfigureTPUEmbedding extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ConfigureTPUEmbedding"; - private ConfigureTPUEmbedding(Operation operation) { - super(operation); + public ConfigureTPUEmbedding(Operation operation) { + super(operation, OP_NAME); } /** @@ -56,6 +62,9 @@ public static ConfigureTPUEmbedding create(Scope scope, String config) { return new ConfigureTPUEmbedding(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ConfigureTPUEmbedding.class + ) public static class Inputs extends RawOpInputs { /** * Serialized tensorflow.tpu.TPUEmbeddingConfiguration that diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java index 52c9e9a4bf0..16cc920242e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = CrossReplicaSum.OP_NAME, + inputsClass = CrossReplicaSum.Inputs.class +) public final class CrossReplicaSum extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class CrossReplicaSum extends RawOp implements O private Output output; - private CrossReplicaSum(Operation operation) { - super(operation); + public CrossReplicaSum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = CrossReplicaSum.class + ) public static class Inputs extends RawOpInputs> { /** * The local input to the sum. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java index 16e93996681..0a12ae56535 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EmbeddingActivations.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -37,6 +39,10 @@ * differentiation of graphs containing embeddings via the TPU Embedding Python * libraries. */ +@OpMetadata( + opType = EmbeddingActivations.OP_NAME, + inputsClass = EmbeddingActivations.Inputs.class +) public final class EmbeddingActivations extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class EmbeddingActivations extends RawOp implements Operand output; - private EmbeddingActivations(Operation operation) { - super(operation); + public EmbeddingActivations(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -90,6 +96,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = EmbeddingActivations.class + ) public static class Inputs extends RawOpInputs { /** * A trainable variable, enabling optimizers to find this op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java index dc6902258ab..51b84ed8f18 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingIntegerBatch.java @@ -27,20 +27,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; /** * An op that enqueues a list of input batch tensors to TPUEmbedding. */ +@OpMetadata( + opType = EnqueueTPUEmbeddingIntegerBatch.OP_NAME, + inputsClass = EnqueueTPUEmbeddingIntegerBatch.Inputs.class +) public final class EnqueueTPUEmbeddingIntegerBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "EnqueueTPUEmbeddingIntegerBatch"; - private EnqueueTPUEmbeddingIntegerBatch(Operation operation) { - super(operation); + public EnqueueTPUEmbeddingIntegerBatch(Operation operation) { + super(operation, OP_NAME); } /** @@ -107,6 +113,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = EnqueueTPUEmbeddingIntegerBatch.class + ) public static class Inputs extends RawOpInputs { /** * A list of 1D tensors, one for each embedding table, containing the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java index 8128d89956f..6d63b32a5bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingRaggedTensorBatch.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -42,14 +44,18 @@ * with dim_size() equal to the total number of lookups into the table described by * the corresponding feature. */ +@OpMetadata( + opType = EnqueueTPUEmbeddingRaggedTensorBatch.OP_NAME, + inputsClass = EnqueueTPUEmbeddingRaggedTensorBatch.Inputs.class +) public final class EnqueueTPUEmbeddingRaggedTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "EnqueueTPUEmbeddingRaggedTensorBatch"; - private EnqueueTPUEmbeddingRaggedTensorBatch(Operation operation) { - super(operation); + public EnqueueTPUEmbeddingRaggedTensorBatch(Operation operation) { + super(operation, OP_NAME); } /** @@ -312,6 +318,9 @@ public Options numFeatures(Long... numFeatures) { } } + @OpInputsMetadata( + outputsClass = EnqueueTPUEmbeddingRaggedTensorBatch.class + ) public static class Inputs extends RawOpInputs { /** * A list of rank 1 Tensors specifying the break points for splitting diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java index b94730af5f3..f33df0dcc5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseBatch.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -43,14 +45,18 @@ * must have the same shape, i.e. rank 1 with dim_size() equal to the total * number of lookups into the table described by the corresponding table_id. */ +@OpMetadata( + opType = EnqueueTPUEmbeddingSparseBatch.OP_NAME, + inputsClass = EnqueueTPUEmbeddingSparseBatch.Inputs.class +) public final class EnqueueTPUEmbeddingSparseBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "EnqueueTPUEmbeddingSparseBatch"; - private EnqueueTPUEmbeddingSparseBatch(Operation operation) { - super(operation); + public EnqueueTPUEmbeddingSparseBatch(Operation operation) { + super(operation, OP_NAME); } /** @@ -199,6 +205,9 @@ public Options combiners(String... combiners) { } } + @OpInputsMetadata( + outputsClass = EnqueueTPUEmbeddingSparseBatch.class + ) public static class Inputs extends RawOpInputs { /** * A list of rank 1 Tensors specifying the training example and diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java index d431aec3598..75f59eca096 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingSparseTensorBatch.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; import org.tensorflow.types.family.TNumber; @@ -42,14 +44,18 @@ * with dim_size() equal to the total number of lookups into the table described by * the corresponding feature. */ +@OpMetadata( + opType = EnqueueTPUEmbeddingSparseTensorBatch.OP_NAME, + inputsClass = EnqueueTPUEmbeddingSparseTensorBatch.Inputs.class +) public final class EnqueueTPUEmbeddingSparseTensorBatch extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "EnqueueTPUEmbeddingSparseTensorBatch"; - private EnqueueTPUEmbeddingSparseTensorBatch(Operation operation) { - super(operation); + public EnqueueTPUEmbeddingSparseTensorBatch(Operation operation) { + super(operation, OP_NAME); } /** @@ -310,6 +316,9 @@ public Options numFeatures(Long... numFeatures) { } } + @OpInputsMetadata( + outputsClass = EnqueueTPUEmbeddingSparseTensorBatch.class + ) public static class Inputs extends RawOpInputs { /** * A list of rank 1 Tensors specifying the training example to diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java index 14c939ac6f9..68e5d3a3f43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Execute.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -39,6 +41,10 @@ * Op that loads and executes a TPU program on a TPU device. * For the internal use of the distributed TPU compiler. */ +@OpMetadata( + opType = Execute.OP_NAME, + inputsClass = Execute.Inputs.class +) @Operator( group = "tpu" ) @@ -51,8 +57,8 @@ public final class Execute extends RawOp implements Iterable> { private List> results; @SuppressWarnings("unchecked") - private Execute(Operation operation) { - super(operation); + public Execute(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int resultsLength = operation.outputListLength("results"); results = Arrays.asList(operation.outputList(outputIdx, resultsLength)); @@ -95,6 +101,9 @@ public Iterator> iterator() { return (Iterator) results.iterator(); } + @OpInputsMetadata( + outputsClass = Execute.class + ) public static class Inputs extends RawOpInputs { /** * The args input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java index 260af4b758a..3506c5104b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ExecuteAndUpdateVariables.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -45,6 +47,10 @@ * program outputs are consumed by these variables will not appear in the op * output. For the internal use of the distributed TPU compiler. */ +@OpMetadata( + opType = ExecuteAndUpdateVariables.OP_NAME, + inputsClass = ExecuteAndUpdateVariables.Inputs.class +) @Operator( group = "tpu" ) @@ -57,8 +63,8 @@ public final class ExecuteAndUpdateVariables extends RawOp implements Iterable> results; @SuppressWarnings("unchecked") - private ExecuteAndUpdateVariables(Operation operation) { - super(operation); + public ExecuteAndUpdateVariables(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int resultsLength = operation.outputListLength("results"); results = Arrays.asList(operation.outputList(outputIdx, resultsLength)); @@ -114,6 +120,9 @@ public Iterator> iterator() { return (Iterator) results.iterator(); } + @OpInputsMetadata( + outputsClass = ExecuteAndUpdateVariables.class + ) public static class Inputs extends RawOpInputs { /** * The args input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java index 4f5e1a1dd73..a7b04bdfc2b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = InfeedDequeue.OP_NAME, + inputsClass = InfeedDequeue.Inputs.class +) public final class InfeedDequeue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class InfeedDequeue extends RawOp implements Opera private Output output; - private InfeedDequeue(Operation operation) { - super(operation); + public InfeedDequeue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = InfeedDequeue.class + ) public static class Inputs extends RawOpInputs> { /** * The type of elements in the tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java index 5dd69faeae1..92658e1ea61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeueTuple.java @@ -31,12 +31,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Fetches multiple values from infeed as an XLA tuple. */ +@OpMetadata( + opType = InfeedDequeueTuple.OP_NAME, + inputsClass = InfeedDequeueTuple.Inputs.class +) public final class InfeedDequeueTuple extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class InfeedDequeueTuple extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private InfeedDequeueTuple(Operation operation) { - super(operation); + public InfeedDequeueTuple(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -92,6 +98,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = InfeedDequeueTuple.class + ) public static class Inputs extends RawOpInputs { /** * The element types of each element in `outputs`. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java index 07d0f827355..019f71df8dc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueue.java @@ -28,20 +28,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * An op which feeds a single Tensor value into the computation. */ +@OpMetadata( + opType = InfeedEnqueue.OP_NAME, + inputsClass = InfeedEnqueue.Inputs.class +) public final class InfeedEnqueue extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "InfeedEnqueue"; - private InfeedEnqueue(Operation operation) { - super(operation); + public InfeedEnqueue(Operation operation) { + super(operation, OP_NAME); } /** @@ -189,6 +195,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = InfeedEnqueue.class + ) public static class Inputs extends RawOpInputs { /** * A tensor that will be provided using the infeed mechanism. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java index 09c8647caf7..f46fec406cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueuePrelinearizedBuffer.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.family.TType; /** * An op which enqueues prelinearized buffer into TPU infeed. */ +@OpMetadata( + opType = InfeedEnqueuePrelinearizedBuffer.OP_NAME, + inputsClass = InfeedEnqueuePrelinearizedBuffer.Inputs.class +) public final class InfeedEnqueuePrelinearizedBuffer extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "InfeedEnqueuePrelinearizedBuffer"; - private InfeedEnqueuePrelinearizedBuffer(Operation operation) { - super(operation); + public InfeedEnqueuePrelinearizedBuffer(Operation operation) { + super(operation, OP_NAME); } /** @@ -99,6 +105,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = InfeedEnqueuePrelinearizedBuffer.class + ) public static class Inputs extends RawOpInputs { /** * A variant tensor representing linearized output. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java index 1704d634091..807f7007e4b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedEnqueueTuple.java @@ -29,19 +29,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; /** * Feeds multiple Tensor values into the computation as an XLA tuple. */ +@OpMetadata( + opType = InfeedEnqueueTuple.OP_NAME, + inputsClass = InfeedEnqueueTuple.Inputs.class +) public final class InfeedEnqueueTuple extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "InfeedEnqueueTuple"; - private InfeedEnqueueTuple(Operation operation) { - super(operation); + public InfeedEnqueueTuple(Operation operation) { + super(operation, OP_NAME); } /** @@ -173,6 +179,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = InfeedEnqueueTuple.class + ) public static class Inputs extends RawOpInputs { /** * A list of tensors that will be provided using the infeed mechanism. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java index fa607e3d65f..1c0b6b3cc62 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingADAMParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingADAMParameters.Inputs.class +) public final class LoadTPUEmbeddingADAMParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingADAMParameters"; - private LoadTPUEmbeddingADAMParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingADAMParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingADAMParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the ADAM optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java index 3689a78b02f..954bd247d65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingADAMParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingADAMParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingADAMParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingADAMParametersGradAccumDebug"; - private LoadTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingADAMParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the ADAM optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java index bc573e662ae..62ea77c23fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingAdadeltaParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingAdadeltaParameters.Inputs.class +) public final class LoadTPUEmbeddingAdadeltaParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingAdadeltaParameters"; - private LoadTPUEmbeddingAdadeltaParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingAdadeltaParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingAdadeltaParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Adadelta optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java index 55c53423e92..0aaa8ccf6d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingAdadeltaParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug"; - private LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Adadelta optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java index 4932fcb0f30..14136f03a9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingAdagradParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingAdagradParameters.Inputs.class +) public final class LoadTPUEmbeddingAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingAdagradParameters"; - private LoadTPUEmbeddingAdagradParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingAdagradParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -160,6 +166,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingAdagradParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Adagrad optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java index 6c6982d38b9..e373ad25faa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingAdagradParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingAdagradParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingAdagradParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingAdagradParametersGradAccumDebug"; - private LoadTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingAdagradParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Adagrad optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java index 5aa18411600..ec974b52bf6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingCenteredRMSPropParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingCenteredRMSPropParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingCenteredRMSPropParameters.Inputs.class +) public final class LoadTPUEmbeddingCenteredRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingCenteredRMSPropParameters"; - private LoadTPUEmbeddingCenteredRMSPropParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingCenteredRMSPropParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingCenteredRMSPropParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the centered RMSProp optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java index 77c88b54456..c4f8fa36ad9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingFTRLParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingFTRLParameters.Inputs.class +) public final class LoadTPUEmbeddingFTRLParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingFTRLParameters"; - private LoadTPUEmbeddingFTRLParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingFTRLParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingFTRLParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the FTRL optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java index 78438e9fec9..40369244b28 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingFTRLParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingFTRLParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingFTRLParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingFTRLParametersGradAccumDebug"; - private LoadTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingFTRLParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the FTRL optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java index 111f9c8541b..0356769ccd8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMDLAdagradLightParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingMDLAdagradLightParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingMDLAdagradLightParameters.Inputs.class +) public final class LoadTPUEmbeddingMDLAdagradLightParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingMDLAdagradLightParameters"; - private LoadTPUEmbeddingMDLAdagradLightParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingMDLAdagradLightParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingMDLAdagradLightParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the MDL Adagrad Light optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java index 1abf0471457..33c1c6e162d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingMomentumParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingMomentumParameters.Inputs.class +) public final class LoadTPUEmbeddingMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingMomentumParameters"; - private LoadTPUEmbeddingMomentumParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingMomentumParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -160,6 +166,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingMomentumParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Momentum optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java index ba22bbb8363..56967a766c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingMomentumParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingMomentumParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingMomentumParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingMomentumParametersGradAccumDebug"; - private LoadTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingMomentumParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the Momentum optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java index 9306ca0d29f..5f1fbb072f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingProximalAdagradParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingProximalAdagradParameters.Inputs.class +) public final class LoadTPUEmbeddingProximalAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingProximalAdagradParameters"; - private LoadTPUEmbeddingProximalAdagradParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingProximalAdagradParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -161,6 +167,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingProximalAdagradParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the proximal Adagrad optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java index 8f1a7957da3..2096552062b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug"; - private LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the proximal Adagrad optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java index 424542eadb2..c108ce861fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParameters.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** * The LoadTPUEmbeddingProximalYogiParameters operation */ +@OpMetadata( + opType = LoadTPUEmbeddingProximalYogiParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingProximalYogiParameters.Inputs.class +) public final class LoadTPUEmbeddingProximalYogiParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingProximalYogiParameters"; - private LoadTPUEmbeddingProximalYogiParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingProximalYogiParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -158,6 +164,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingProximalYogiParameters.class + ) public static class Inputs extends RawOpInputs { /** * The parameters input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java index 4c3dfed12dc..94a1f0926ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** * The LoadTPUEmbeddingProximalYogiParametersGradAccumDebug operation */ +@OpMetadata( + opType = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingProximalYogiParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug"; - private LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -160,6 +166,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The parameters input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java index 69e422ee802..30cdfa94252 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingRMSPropParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingRMSPropParameters.Inputs.class +) public final class LoadTPUEmbeddingRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingRMSPropParameters"; - private LoadTPUEmbeddingRMSPropParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingRMSPropParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -163,6 +169,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingRMSPropParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the RMSProp optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java index 3f987e4beac..c6d8c3a4800 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingRMSPropParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingRMSPropParametersGradAccumDebug"; - private LoadTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -165,6 +171,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the RMSProp optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java index 62e73a01c52..b43487c0c8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingStochasticGradientDescentParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingStochasticGradientDescentParameters.Inputs.class +) public final class LoadTPUEmbeddingStochasticGradientDescentParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingStochasticGradientDescentParameters"; - private LoadTPUEmbeddingStochasticGradientDescentParameters(Operation operation) { - super(operation); + public LoadTPUEmbeddingStochasticGradientDescentParameters(Operation operation) { + super(operation, OP_NAME); } /** @@ -158,6 +164,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingStochasticGradientDescentParameters.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the stochastic gradient descent optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java index dd5fd61bc44..01307c9da5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,14 +38,18 @@ * parameters that are loaded from a checkpoint before a training loop is * executed. */ +@OpMetadata( + opType = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.OP_NAME, + inputsClass = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.Inputs.class +) public final class LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"; - private LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(Operation operation) { - super(operation); + public LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); } /** @@ -161,6 +167,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * Value of parameters used in the stochastic gradient descent optimization algorithm. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java index c026d5f55a4..eb737021f93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OrdinalSelector.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; /** @@ -35,6 +37,10 @@ * (for regular inference) to execute the TPU program on. The output is * consumed by TPUPartitionedCall. */ +@OpMetadata( + opType = OrdinalSelector.OP_NAME, + inputsClass = OrdinalSelector.Inputs.class +) public final class OrdinalSelector extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class OrdinalSelector extends RawOp implements Operand { private Output deviceOrdinals; - private OrdinalSelector(Operation operation) { - super(operation); + public OrdinalSelector(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; deviceOrdinals = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return deviceOrdinals; } + @OpInputsMetadata( + outputsClass = OrdinalSelector.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new OrdinalSelector(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java index b2cd4412cae..26d20976632 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = OutfeedDequeue.OP_NAME, + inputsClass = OutfeedDequeue.Inputs.class +) public final class OutfeedDequeue extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class OutfeedDequeue extends RawOp implements Oper private Output output; - private OutfeedDequeue(Operation operation) { - super(operation); + public OutfeedDequeue(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -129,6 +135,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = OutfeedDequeue.class + ) public static class Inputs extends RawOpInputs> { /** * The type of elements in the tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java index c667cf7a4ed..a14130ea768 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTuple.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * This operation will block indefinitely until data is available. Output {@code i} * corresponds to XLA tuple element {@code i}. */ +@OpMetadata( + opType = OutfeedDequeueTuple.OP_NAME, + inputsClass = OutfeedDequeueTuple.Inputs.class +) public final class OutfeedDequeueTuple extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class OutfeedDequeueTuple extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private OutfeedDequeueTuple(Operation operation) { - super(operation); + public OutfeedDequeueTuple(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -137,6 +143,9 @@ public Options deviceOrdinal(Long deviceOrdinal) { } } + @OpInputsMetadata( + outputsClass = OutfeedDequeueTuple.class + ) public static class Inputs extends RawOpInputs { /** * The element types of each element in `outputs`. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java index 328e3a7da5f..84331496731 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueTupleV2.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * This operation will block indefinitely until data is available. Output {@code i} * corresponds to XLA tuple element {@code i}. */ +@OpMetadata( + opType = OutfeedDequeueTupleV2.OP_NAME, + inputsClass = OutfeedDequeueTupleV2.Inputs.class +) public final class OutfeedDequeueTupleV2 extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class OutfeedDequeueTupleV2 extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private OutfeedDequeueTupleV2(Operation operation) { - super(operation); + public OutfeedDequeueTupleV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -100,6 +106,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = OutfeedDequeueTupleV2.class + ) public static class Inputs extends RawOpInputs { /** * An int scalar tensor, representing the TPU device to use. This should be -1 when diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java index bdd0a7f4157..19bdb087c4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = OutfeedDequeueV2.OP_NAME, + inputsClass = OutfeedDequeueV2.Inputs.class +) public final class OutfeedDequeueV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -48,8 +54,8 @@ public final class OutfeedDequeueV2 extends RawOp implements Op private Output output; - private OutfeedDequeueV2(Operation operation) { - super(operation); + public OutfeedDequeueV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = OutfeedDequeueV2.class + ) public static class Inputs extends RawOpInputs> { /** * An int scalar tensor, representing the TPU device to use. This should be -1 when diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java index effdad173b0..a934cf681d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueue.java @@ -26,20 +26,26 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * Enqueue a Tensor on the computation outfeed. */ +@OpMetadata( + opType = OutfeedEnqueue.OP_NAME, + inputsClass = OutfeedEnqueue.Inputs.class +) public final class OutfeedEnqueue extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "OutfeedEnqueue"; - private OutfeedEnqueue(Operation operation) { - super(operation); + public OutfeedEnqueue(Operation operation) { + super(operation, OP_NAME); } /** @@ -58,6 +64,9 @@ public static OutfeedEnqueue create(Scope scope, Operand input) return new OutfeedEnqueue(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = OutfeedEnqueue.class + ) public static class Inputs extends RawOpInputs { /** * A tensor that will be inserted into the outfeed queue. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java index 5e915899fd6..158315c18f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedEnqueueTuple.java @@ -27,19 +27,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; /** * Enqueue multiple Tensor values on the computation outfeed. */ +@OpMetadata( + opType = OutfeedEnqueueTuple.OP_NAME, + inputsClass = OutfeedEnqueueTuple.Inputs.class +) public final class OutfeedEnqueueTuple extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "OutfeedEnqueueTuple"; - private OutfeedEnqueueTuple(Operation operation) { - super(operation); + public OutfeedEnqueueTuple(Operation operation) { + super(operation, OP_NAME); } /** @@ -59,6 +65,9 @@ public static OutfeedEnqueueTuple create(Scope scope, Iterable> input return new OutfeedEnqueueTuple(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = OutfeedEnqueueTuple.class + ) public static class Inputs extends RawOpInputs { /** * A list of tensors that will be inserted into the outfeed queue as an diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java index a85442af958..2e9e97399af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedCall.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Calls a function placed on a specified TPU device. */ +@OpMetadata( + opType = PartitionedCall.OP_NAME, + inputsClass = PartitionedCall.Inputs.class +) public final class PartitionedCall extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class PartitionedCall extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private PartitionedCall(Operation operation) { - super(operation); + public PartitionedCall(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -133,6 +139,9 @@ public Options autotunerThresh(Long autotunerThresh) { } } + @OpInputsMetadata( + outputsClass = PartitionedCall.class + ) public static class Inputs extends RawOpInputs { /** * The arguments to the function. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java index 8e080b4fd05..3800a656fc4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = PartitionedInput.OP_NAME, + inputsClass = PartitionedInput.Inputs.class +) @Operator( group = "tpu" ) @@ -48,8 +54,8 @@ public final class PartitionedInput extends RawOp implements Op private Output output; - private PartitionedInput(Operation operation) { - super(operation); + public PartitionedInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -127,6 +133,9 @@ public Options partitionDim(Long partitionDim) { } } + @OpInputsMetadata( + outputsClass = PartitionedInput.class + ) public static class Inputs extends RawOpInputs> { /** * A list of partitioned inputs which must have the same shape. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java index 626610bad5e..4bf51f2ba49 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = PartitionedOutput.OP_NAME, + inputsClass = PartitionedOutput.Inputs.class +) @Operator( group = "tpu" ) @@ -51,8 +57,8 @@ public final class PartitionedOutput extends RawOp implements I private List> output; @SuppressWarnings("unchecked") - private PartitionedOutput(Operation operation) { - super(operation); + public PartitionedOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -133,6 +139,9 @@ public Options partitionDim(Long partitionDim) { } } + @OpInputsMetadata( + outputsClass = PartitionedOutput.class + ) public static class Inputs extends RawOpInputs> { /** * A tensor which represents the full shape of partitioned tensors. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java index ddb2f379660..df3eddd9523 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/Prelinearize.java @@ -29,12 +29,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * An op which linearizes one Tensor value to an opaque variant tensor. */ +@OpMetadata( + opType = Prelinearize.OP_NAME, + inputsClass = Prelinearize.Inputs.class +) public final class Prelinearize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class Prelinearize extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private Prelinearize(Operation operation) { - super(operation); + public Prelinearize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -180,6 +186,9 @@ public Options layout(Long... layout) { } } + @OpInputsMetadata( + outputsClass = Prelinearize.class + ) public static class Inputs extends RawOpInputs { /** * A tensor that will be linearized. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java index 344cab1a66a..bb9d386ff87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PrelinearizeTuple.java @@ -30,12 +30,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; /** * An op which linearizes multiple Tensor values to an opaque variant tensor. */ +@OpMetadata( + opType = PrelinearizeTuple.OP_NAME, + inputsClass = PrelinearizeTuple.Inputs.class +) public final class PrelinearizeTuple extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class PrelinearizeTuple extends RawOp implements Operand { private Output output; @SuppressWarnings("unchecked") - private PrelinearizeTuple(Operation operation) { - super(operation); + public PrelinearizeTuple(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -165,6 +171,9 @@ public Options layouts(Long... layouts) { } } + @OpInputsMetadata( + outputsClass = PrelinearizeTuple.class + ) public static class Inputs extends RawOpInputs { /** * A list of tensors that will be provided using the infeed mechanism. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java index 4dc37a810c5..b603c292192 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RecvTPUEmbeddingActivations.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -40,6 +42,10 @@ * one Tensor of activations per table specified in the model. There can be at * most one RecvTPUEmbeddingActivations op in the TPU graph. */ +@OpMetadata( + opType = RecvTPUEmbeddingActivations.OP_NAME, + inputsClass = RecvTPUEmbeddingActivations.Inputs.class +) public final class RecvTPUEmbeddingActivations extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RecvTPUEmbeddingActivations extends RawOp implements Iterable private List> outputs; @SuppressWarnings("unchecked") - private RecvTPUEmbeddingActivations(Operation operation) { - super(operation); + public RecvTPUEmbeddingActivations(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); @@ -92,6 +98,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = RecvTPUEmbeddingActivations.class + ) public static class Inputs extends RawOpInputs { /** * Serialized TPUEmbeddingConfiguration proto. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java index 8e12a13b887..1696d439318 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java @@ -26,19 +26,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; /** * Metadata indicating how the TPU computation should be replicated. * This operation holds the metadata common to operations of a {@code tpu.replicate()} computation subgraph. */ +@OpMetadata( + opType = ReplicateMetadata.OP_NAME, + inputsClass = ReplicateMetadata.Inputs.class +) public final class ReplicateMetadata extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "TPUReplicateMetadata"; - private ReplicateMetadata(Operation operation) { - super(operation); + public ReplicateMetadata(Operation operation) { + super(operation, OP_NAME); } /** @@ -430,6 +436,9 @@ public Options useSpmdForXlaPartitioning(Boolean useSpmdForXlaPartitioning) { } } + @OpInputsMetadata( + outputsClass = ReplicateMetadata.class + ) public static class Inputs extends RawOpInputs { /** * Number of replicas of the computation diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java index 1bfa9e59e25..7aabbce711a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -46,6 +48,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReplicatedInput.OP_NAME, + inputsClass = ReplicatedInput.Inputs.class +) public final class ReplicatedInput extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -54,8 +60,8 @@ public final class ReplicatedInput extends RawOp implements Ope private Output output; - private ReplicatedInput(Operation operation) { - super(operation); + public ReplicatedInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -183,6 +189,9 @@ public Options isPacked(Boolean isPacked) { } } + @OpInputsMetadata( + outputsClass = ReplicatedInput.class + ) public static class Inputs extends RawOpInputs> { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java index f644763c1e8..6d956b0ef19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -45,6 +47,10 @@ * * @param data type for {@code outputs} output */ +@OpMetadata( + opType = ReplicatedOutput.OP_NAME, + inputsClass = ReplicatedOutput.Inputs.class +) public final class ReplicatedOutput extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine @@ -54,8 +60,8 @@ public final class ReplicatedOutput extends RawOp implements It private List> outputs; @SuppressWarnings("unchecked") - private ReplicatedOutput(Operation operation) { - super(operation); + public ReplicatedOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); @@ -97,6 +103,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = ReplicatedOutput.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java index d05f62e2f3e..122e62b05f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingADAMParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingADAMParameters.Inputs.class +) public final class RetrieveTPUEmbeddingADAMParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingADAMParameters extends RawOp { private Output velocities; - private RetrieveTPUEmbeddingADAMParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingADAMParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); momenta = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingADAMParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java index c08259b0d89..e598b5ec412 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingADAMParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingADAMParametersGradAccumDebug extends RawO private Output gradientAccumulators; - private RetrieveTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); momenta = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java index d7f5626f574..5b38fd50f80 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingAdadeltaParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingAdadeltaParameters.Inputs.class +) public final class RetrieveTPUEmbeddingAdadeltaParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingAdadeltaParameters extends RawOp { private Output updates; - private RetrieveTPUEmbeddingAdadeltaParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingAdadeltaParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingAdadeltaParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java index 7740306b350..054fdbfc2cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug extends private Output gradientAccumulators; - private RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java index 64762a31449..4f77f80451a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingAdagradParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingAdagradParameters.Inputs.class +) public final class RetrieveTPUEmbeddingAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RetrieveTPUEmbeddingAdagradParameters extends RawOp { private Output accumulators; - private RetrieveTPUEmbeddingAdagradParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingAdagradParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -180,6 +186,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingAdagradParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java index 78c29954df4..cf2c37361ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingAdagradParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingAdagradParametersGradAccumDebug extends R private Output gradientAccumulators; - private RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java index a4dd7ba910e..9db53a133f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingCenteredRMSPropParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingCenteredRMSPropParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingCenteredRMSPropParameters.Inputs.class +) public final class RetrieveTPUEmbeddingCenteredRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingCenteredRMSPropParameters extends RawOp { private Output mg; - private RetrieveTPUEmbeddingCenteredRMSPropParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingCenteredRMSPropParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); ms = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingCenteredRMSPropParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java index 6e1ab7a50c0..cd876266705 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingFTRLParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingFTRLParameters.Inputs.class +) public final class RetrieveTPUEmbeddingFTRLParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingFTRLParameters extends RawOp { private Output linears; - private RetrieveTPUEmbeddingFTRLParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingFTRLParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingFTRLParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java index 5dc9db5c737..7dab6b465a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingFTRLParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingFTRLParametersGradAccumDebug extends RawO private Output gradientAccumulators; - private RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java index 0b341ec67e7..018babc869a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMDLAdagradLightParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingMDLAdagradLightParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingMDLAdagradLightParameters.Inputs.class +) public final class RetrieveTPUEmbeddingMDLAdagradLightParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingMDLAdagradLightParameters extends RawOp { private Output benefits; - private RetrieveTPUEmbeddingMDLAdagradLightParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingMDLAdagradLightParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingMDLAdagradLightParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java index 360d345257c..20f6ee9d54f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingMomentumParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingMomentumParameters.Inputs.class +) public final class RetrieveTPUEmbeddingMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RetrieveTPUEmbeddingMomentumParameters extends RawOp { private Output momenta; - private RetrieveTPUEmbeddingMomentumParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingMomentumParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); momenta = operation.output(outputIdx++); @@ -180,6 +186,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingMomentumParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java index 5ba59def58d..aa51de4d404 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingMomentumParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingMomentumParametersGradAccumDebug extends private Output gradientAccumulators; - private RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); momenta = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java index 5a351245ecb..3440a794cd4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingProximalAdagradParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingProximalAdagradParameters.Inputs.class +) public final class RetrieveTPUEmbeddingProximalAdagradParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RetrieveTPUEmbeddingProximalAdagradParameters extends RawOp { private Output accumulators; - private RetrieveTPUEmbeddingProximalAdagradParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingProximalAdagradParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -180,6 +186,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingProximalAdagradParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java index 740410a1c49..3565cbce042 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug e private Output gradientAccumulators; - private RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java index 3caff4629ad..083ef769cb9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParameters.java @@ -26,11 +26,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** * The RetrieveTPUEmbeddingProximalYogiParameters operation */ +@OpMetadata( + opType = RetrieveTPUEmbeddingProximalYogiParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingProximalYogiParameters.Inputs.class +) public final class RetrieveTPUEmbeddingProximalYogiParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -43,8 +49,8 @@ public final class RetrieveTPUEmbeddingProximalYogiParameters extends RawOp { private Output m; - private RetrieveTPUEmbeddingProximalYogiParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingProximalYogiParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -188,6 +194,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingProximalYogiParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java index 50fe386f14f..61c55552cd7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java @@ -26,11 +26,17 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** * The RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug operation */ +@OpMetadata( + opType = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug exte private Output gradientAccumulators; - private RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -200,6 +206,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java index 0db31288db7..8826547e35e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParameters.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingRMSPropParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingRMSPropParameters.Inputs.class +) public final class RetrieveTPUEmbeddingRMSPropParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class RetrieveTPUEmbeddingRMSPropParameters extends RawOp { private Output mom; - private RetrieveTPUEmbeddingRMSPropParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingRMSPropParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); ms = operation.output(outputIdx++); @@ -192,6 +198,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingRMSPropParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java index 3d32bd5d1f2..ee671e633d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -49,8 +55,8 @@ public final class RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug extends R private Output gradientAccumulators; - private RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); ms = operation.output(outputIdx++); @@ -204,6 +210,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java index 7d8ab5ae954..c789908ea4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParameters.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -36,6 +38,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingStochasticGradientDescentParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParameters.Inputs.class +) public final class RetrieveTPUEmbeddingStochasticGradientDescentParameters extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -44,8 +50,8 @@ public final class RetrieveTPUEmbeddingStochasticGradientDescentParameters exten private Output parameters; - private RetrieveTPUEmbeddingStochasticGradientDescentParameters(Operation operation) { - super(operation); + public RetrieveTPUEmbeddingStochasticGradientDescentParameters(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); } @@ -174,6 +180,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParameters.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java index c199ce91f51..6941a062872 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -35,6 +37,10 @@ * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ +@OpMetadata( + opType = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.OP_NAME, + inputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.Inputs.class +) public final class RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -45,9 +51,9 @@ public final class RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAc private Output gradientAccumulators; - private RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug( + public RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug( Operation operation) { - super(operation); + super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); gradientAccumulators = operation.output(outputIdx++); @@ -181,6 +187,9 @@ public Options config(String config) { } } + @OpInputsMetadata( + outputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.class + ) public static class Inputs extends RawOpInputs { /** * The tableId attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java index 0556e51c322..45cf2fac6cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SendTPUEmbeddingGradients.java @@ -27,19 +27,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** * Performs gradient updates of embedding tables. */ +@OpMetadata( + opType = SendTPUEmbeddingGradients.OP_NAME, + inputsClass = SendTPUEmbeddingGradients.Inputs.class +) public final class SendTPUEmbeddingGradients extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "SendTPUEmbeddingGradients"; - private SendTPUEmbeddingGradients(Operation operation) { - super(operation); + public SendTPUEmbeddingGradients(Operation operation) { + super(operation, OP_NAME); } /** @@ -112,6 +118,9 @@ public Options NN(Long NN) { } } + @OpInputsMetadata( + outputsClass = SendTPUEmbeddingGradients.class + ) public static class Inputs extends RawOpInputs { /** * A TensorList of gradients with which to update embedding tables. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java index bfd80d334da..b2a8949e876 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ShutdownDistributedTPU.java @@ -25,19 +25,25 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; /** * Shuts down a running distributed TPU system. * The op returns an error if no system is running. */ +@OpMetadata( + opType = ShutdownDistributedTPU.OP_NAME, + inputsClass = ShutdownDistributedTPU.Inputs.class +) public final class ShutdownDistributedTPU extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ShutdownDistributedTPU"; - private ShutdownDistributedTPU(Operation operation) { - super(operation); + public ShutdownDistributedTPU(Operation operation) { + super(operation, OP_NAME); } /** @@ -54,6 +60,9 @@ public static ShutdownDistributedTPU create(Scope scope) { return new ShutdownDistributedTPU(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ShutdownDistributedTPU.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new ShutdownDistributedTPU(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java index 7bc258d87f6..1b8dffc0997 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUCompilationResult.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; /** @@ -37,6 +39,10 @@ * * @deprecated use {@link org.tensorflow.op.tpu.CompilationResult} instead */ +@OpMetadata( + opType = TPUCompilationResult.OP_NAME, + inputsClass = TPUCompilationResult.Inputs.class +) @Deprecated public final class TPUCompilationResult extends RawOp implements Operand { /** @@ -46,8 +52,8 @@ public final class TPUCompilationResult extends RawOp implements Operand output; - private TPUCompilationResult(Operation operation) { - super(operation); + public TPUCompilationResult(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -80,6 +86,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TPUCompilationResult.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new TPUCompilationResult(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java index 360c11d3a5a..29ea1c90743 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUEmbeddingActivations.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; /** @@ -39,6 +41,10 @@ * * @deprecated use {@link org.tensorflow.op.tpu.EmbeddingActivations} instead */ +@OpMetadata( + opType = TPUEmbeddingActivations.OP_NAME, + inputsClass = TPUEmbeddingActivations.Inputs.class +) @Deprecated public final class TPUEmbeddingActivations extends RawOp implements Operand { /** @@ -48,8 +54,8 @@ public final class TPUEmbeddingActivations extends RawOp implements Operand output; - private TPUEmbeddingActivations(Operation operation) { - super(operation); + public TPUEmbeddingActivations(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TPUEmbeddingActivations.class + ) public static class Inputs extends RawOpInputs { /** * A trainable variable, enabling optimizers to find this op. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java index 124bccfda5c..31f02d03220 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicateMetadata.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; /** * Metadata indicating how the TPU computation should be replicated. @@ -33,6 +35,10 @@ * * @deprecated use {@link org.tensorflow.op.tpu.ReplicateMetadata} instead */ +@OpMetadata( + opType = TPUReplicateMetadata.OP_NAME, + inputsClass = TPUReplicateMetadata.Inputs.class +) @Deprecated public final class TPUReplicateMetadata extends RawOp { /** @@ -40,8 +46,8 @@ public final class TPUReplicateMetadata extends RawOp { */ public static final String OP_NAME = "TPUReplicateMetadata"; - private TPUReplicateMetadata(Operation operation) { - super(operation); + public TPUReplicateMetadata(Operation operation) { + super(operation, OP_NAME); } /** @@ -433,6 +439,9 @@ public Options useSpmdForXlaPartitioning(Boolean useSpmdForXlaPartitioning) { } } + @OpInputsMetadata( + outputsClass = TPUReplicateMetadata.class + ) public static class Inputs extends RawOpInputs { /** * Number of replicas of the computation diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java index bfb2ef52af0..b64bcef7902 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -48,6 +50,10 @@ * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedInput} instead */ +@OpMetadata( + opType = TPUReplicatedInput.OP_NAME, + inputsClass = TPUReplicatedInput.Inputs.class +) @Deprecated public final class TPUReplicatedInput extends RawOp implements Operand { /** @@ -57,8 +63,8 @@ public final class TPUReplicatedInput extends RawOp implements private Output output; - private TPUReplicatedInput(Operation operation) { - super(operation); + public TPUReplicatedInput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -186,6 +192,9 @@ public Options isPacked(Boolean isPacked) { } } + @OpInputsMetadata( + outputsClass = TPUReplicatedInput.class + ) public static class Inputs extends RawOpInputs> { /** * The inputs input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java index e20f5d1b4c6..ac389716703 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedOutput} instead */ +@OpMetadata( + opType = TPUReplicatedOutput.OP_NAME, + inputsClass = TPUReplicatedOutput.Inputs.class +) @Deprecated public final class TPUReplicatedOutput extends RawOp implements Iterable> { /** @@ -57,8 +63,8 @@ public final class TPUReplicatedOutput extends RawOp implements private List> outputs; @SuppressWarnings("unchecked") - private TPUReplicatedOutput(Operation operation) { - super(operation); + public TPUReplicatedOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); @@ -100,6 +106,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = TPUReplicatedOutput.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java index 5308bcc3b37..5f0a1ee179e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReshardVariables.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; import org.tensorflow.types.family.TType; @@ -38,14 +40,18 @@ * specifies the desired state, and format_state_var is the current state of the * variables. */ +@OpMetadata( + opType = TPUReshardVariables.OP_NAME, + inputsClass = TPUReshardVariables.Inputs.class +) public final class TPUReshardVariables extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "TPUReshardVariables"; - private TPUReshardVariables(Operation operation) { - super(operation); + public TPUReshardVariables(Operation operation) { + super(operation, OP_NAME); } /** @@ -69,6 +75,9 @@ public static TPUReshardVariables create(Scope scope, Iterable { /** * The vars input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java index cff0719e947..97e338c0046 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/WorkerHeartbeat.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TString; /** @@ -34,6 +36,10 @@ * Heartbeats may be sent periodically to indicate the coordinator is still active, * to retrieve the current worker status and to expedite shutdown when necessary. */ +@OpMetadata( + opType = WorkerHeartbeat.OP_NAME, + inputsClass = WorkerHeartbeat.Inputs.class +) public final class WorkerHeartbeat extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -42,8 +48,8 @@ public final class WorkerHeartbeat extends RawOp implements Operand { private Output response; - private WorkerHeartbeat(Operation operation) { - super(operation); + public WorkerHeartbeat(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; response = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return response; } + @OpInputsMetadata( + outputsClass = WorkerHeartbeat.class + ) public static class Inputs extends RawOpInputs { /** * A string tensor containing a serialized WorkerHeartbeatRequest diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java index 64f16b73bab..32824da9c0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorApplyGradient.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -36,6 +38,10 @@ * Applies a gradient to a given accumulator. * Does not add if local_step is lesser than the accumulator's global_step. */ +@OpMetadata( + opType = AccumulatorApplyGradient.OP_NAME, + inputsClass = AccumulatorApplyGradient.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class AccumulatorApplyGradient extends RawOp { */ public static final String OP_NAME = "AccumulatorApplyGradient"; - private AccumulatorApplyGradient(Operation operation) { - super(operation); + public AccumulatorApplyGradient(Operation operation) { + super(operation, OP_NAME); } /** @@ -70,6 +76,9 @@ public static AccumulatorApplyGradient create(Scope scope, Operand hand return new AccumulatorApplyGradient(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = AccumulatorApplyGradient.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java index 73199c30e4b..f97d7db1848 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorNumAccumulated.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TString; @@ -34,6 +36,10 @@ /** * Returns the number of gradients aggregated in the given accumulators. */ +@OpMetadata( + opType = AccumulatorNumAccumulated.OP_NAME, + inputsClass = AccumulatorNumAccumulated.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class AccumulatorNumAccumulated extends RawOp implements Operand numAccumulated; - private AccumulatorNumAccumulated(Operation operation) { - super(operation); + public AccumulatorNumAccumulated(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; numAccumulated = operation.output(outputIdx++); } @@ -81,6 +87,9 @@ public Output asOutput() { return numAccumulated; } + @OpInputsMetadata( + outputsClass = AccumulatorNumAccumulated.class + ) public static class Inputs extends RawOpInputs { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java index 23092eb9759..12536b44d97 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorSetGlobalStep.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -35,6 +37,10 @@ * Logs warning if the accumulator's value is already higher than * new_global_step. */ +@OpMetadata( + opType = AccumulatorSetGlobalStep.OP_NAME, + inputsClass = AccumulatorSetGlobalStep.Inputs.class +) @Operator( group = "train" ) @@ -44,8 +50,8 @@ public final class AccumulatorSetGlobalStep extends RawOp { */ public static final String OP_NAME = "AccumulatorSetGlobalStep"; - private AccumulatorSetGlobalStep(Operation operation) { - super(operation); + public AccumulatorSetGlobalStep(Operation operation) { + super(operation, OP_NAME); } /** @@ -67,6 +73,9 @@ public static AccumulatorSetGlobalStep create(Scope scope, Operand hand return new AccumulatorSetGlobalStep(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = AccumulatorSetGlobalStep.class + ) public static class Inputs extends RawOpInputs { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java index 52d6bce2df3..2d7c955b968 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -44,6 +46,10 @@ * * @param data type for {@code average} output */ +@OpMetadata( + opType = AccumulatorTakeGradient.OP_NAME, + inputsClass = AccumulatorTakeGradient.Inputs.class +) @Operator( group = "train" ) @@ -55,8 +61,8 @@ public final class AccumulatorTakeGradient extends RawOp implem private Output average; - private AccumulatorTakeGradient(Operation operation) { - super(operation); + public AccumulatorTakeGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; average = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return average; } + @OpInputsMetadata( + outputsClass = AccumulatorTakeGradient.class + ) public static class Inputs extends RawOpInputs> { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java index 84b1ccd7a52..471b49c7995 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdaMax.OP_NAME, + inputsClass = ApplyAdaMax.Inputs.class +) public final class ApplyAdaMax extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -46,8 +52,8 @@ public final class ApplyAdaMax extends RawOp implements Operand private Output out; - private ApplyAdaMax(Operation operation) { - super(operation); + public ApplyAdaMax(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyAdaMax.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java index 2d908547cfd..1ec7edff090 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdadelta.OP_NAME, + inputsClass = ApplyAdadelta.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class ApplyAdadelta extends RawOp implements Opera private Output out; - private ApplyAdadelta(Operation operation) { - super(operation); + public ApplyAdadelta(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -143,6 +149,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyAdadelta.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java index 1296c191656..25d2050ebe8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdagrad.OP_NAME, + inputsClass = ApplyAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -49,8 +55,8 @@ public final class ApplyAdagrad extends RawOp implements Operan private Output out; - private ApplyAdagrad(Operation operation) { - super(operation); + public ApplyAdagrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -162,6 +168,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = ApplyAdagrad.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java index a3f60e0471f..e6668ee1593 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -37,6 +39,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdagradDa.OP_NAME, + inputsClass = ApplyAdagradDa.Inputs.class +) @Operator( group = "train" ) @@ -48,8 +54,8 @@ public final class ApplyAdagradDa extends RawOp implements Oper private Output out; - private ApplyAdagradDa(Operation operation) { - super(operation); + public ApplyAdagradDa(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyAdagradDa.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java index f26f0ca7e82..61b3e118bee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdagradV2.OP_NAME, + inputsClass = ApplyAdagradV2.Inputs.class +) public final class ApplyAdagradV2 extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -45,8 +51,8 @@ public final class ApplyAdagradV2 extends RawOp implements Oper private Output out; - private ApplyAdagradV2(Operation operation) { - super(operation); + public ApplyAdagradV2(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -160,6 +166,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = ApplyAdagradV2.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java index ae419bd1640..689acdd1364 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAdam.OP_NAME, + inputsClass = ApplyAdam.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class ApplyAdam extends RawOp implements Operand out; - private ApplyAdam(Operation operation) { - super(operation); + public ApplyAdam(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -177,6 +183,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ApplyAdam.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java index 5eb80c556c3..8daf7e8ee25 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyAddSign.OP_NAME, + inputsClass = ApplyAddSign.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class ApplyAddSign extends RawOp implements Operan private Output out; - private ApplyAddSign(Operation operation) { - super(operation); + public ApplyAddSign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyAddSign.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java index 5872ccce7b1..e6dd1402cb2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -50,6 +52,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyCenteredRmsProp.OP_NAME, + inputsClass = ApplyCenteredRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -61,8 +67,8 @@ public final class ApplyCenteredRmsProp extends RawOp implement private Output out; - private ApplyCenteredRmsProp(Operation operation) { - super(operation); + public ApplyCenteredRmsProp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -159,6 +165,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyCenteredRmsProp.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java index af10fab7ed5..c977bdb6ce2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyFtrl.OP_NAME, + inputsClass = ApplyFtrl.Inputs.class +) @Operator( group = "train" ) @@ -54,8 +60,8 @@ public final class ApplyFtrl extends RawOp implements Operand out; - private ApplyFtrl(Operation operation) { - super(operation); + public ApplyFtrl(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -178,6 +184,9 @@ public Options multiplyLinearByLr(Boolean multiplyLinearByLr) { } } + @OpInputsMetadata( + outputsClass = ApplyFtrl.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java index 0e459aefa68..dd732971803 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyGradientDescent.OP_NAME, + inputsClass = ApplyGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -47,8 +53,8 @@ public final class ApplyGradientDescent extends RawOp implement private Output out; - private ApplyGradientDescent(Operation operation) { - super(operation); + public ApplyGradientDescent(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -130,6 +136,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyGradientDescent.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java index cf22b9bcbed..024f1d554ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyMomentum.OP_NAME, + inputsClass = ApplyMomentum.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class ApplyMomentum extends RawOp implements Opera private Output out; - private ApplyMomentum(Operation operation) { - super(operation); + public ApplyMomentum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -169,6 +175,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ApplyMomentum.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java index 88fd9980c94..6c0d708ac35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyPowerSign.OP_NAME, + inputsClass = ApplyPowerSign.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class ApplyPowerSign extends RawOp implements Oper private Output out; - private ApplyPowerSign(Operation operation) { - super(operation); + public ApplyPowerSign(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -144,6 +150,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyPowerSign.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java index aa2dc292c1a..f9307774264 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyProximalAdagrad.OP_NAME, + inputsClass = ApplyProximalAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class ApplyProximalAdagrad extends RawOp implement private Output out; - private ApplyProximalAdagrad(Operation operation) { - super(operation); + public ApplyProximalAdagrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -140,6 +146,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyProximalAdagrad.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java index d1c6ab76cea..b30f1c5401d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyProximalGradientDescent.OP_NAME, + inputsClass = ApplyProximalGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -49,8 +55,8 @@ public final class ApplyProximalGradientDescent extends RawOp i private Output out; - private ApplyProximalGradientDescent(Operation operation) { - super(operation); + public ApplyProximalGradientDescent(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -137,6 +143,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyProximalGradientDescent.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java index 3c9ad0c5087..7bd5e6b2111 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -44,6 +46,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = ApplyRmsProp.OP_NAME, + inputsClass = ApplyRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -55,8 +61,8 @@ public final class ApplyRmsProp extends RawOp implements Operan private Output out; - private ApplyRmsProp(Operation operation) { - super(operation); + public ApplyRmsProp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -151,6 +157,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ApplyRmsProp.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java index 2efb6c587ac..50ec0d48706 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -57,6 +59,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = BatchMatMul.OP_NAME, + inputsClass = BatchMatMul.Inputs.class +) @Operator( group = "train" ) @@ -68,8 +74,8 @@ public final class BatchMatMul extends RawOp implements Operand private Output output; - private BatchMatMul(Operation operation) { - super(operation); + public BatchMatMul(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -175,6 +181,9 @@ public Options adjY(Boolean adjY) { } } + @OpInputsMetadata( + outputsClass = BatchMatMul.class + ) public static class Inputs extends RawOpInputs> { /** * 2-D or higher with shape {@code [..., r_x, c_x]}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java index ef4a34b1a9a..4912f05ce64 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ComputeBatchSize.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; /** * Computes the static batch size of a dataset sans partial batches. */ +@OpMetadata( + opType = ComputeBatchSize.OP_NAME, + inputsClass = ComputeBatchSize.Inputs.class +) public final class ComputeBatchSize extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class ComputeBatchSize extends RawOp implements Operand { private Output batchSize; - private ComputeBatchSize(Operation operation) { - super(operation); + public ComputeBatchSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; batchSize = operation.output(outputIdx++); } @@ -77,6 +83,9 @@ public Output asOutput() { return batchSize; } + @OpInputsMetadata( + outputsClass = ComputeBatchSize.class + ) public static class Inputs extends RawOpInputs { /** * The inputDataset input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java index ae58b09075e..c20b316f1ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ConditionalAccumulator.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -43,6 +45,10 @@ * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. */ +@OpMetadata( + opType = ConditionalAccumulator.OP_NAME, + inputsClass = ConditionalAccumulator.Inputs.class +) @Operator( group = "train" ) @@ -54,8 +60,8 @@ public final class ConditionalAccumulator extends RawOp implements Operand handle; - private ConditionalAccumulator(Operation operation) { - super(operation); + public ConditionalAccumulator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -189,6 +195,9 @@ public Options reductionType(String reductionType) { } } + @OpInputsMetadata( + outputsClass = ConditionalAccumulator.class + ) public static class Inputs extends RawOpInputs { /** * The type of the value being accumulated. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java index d5246b11201..0ee41c32d14 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/GenerateVocabRemapping.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; @@ -59,6 +61,10 @@ * use the corresponding index_table_from_file() as the FeatureColumn framework * does (as opposed to tf.feature_to_id(), which uses a CuckooTable). */ +@OpMetadata( + opType = GenerateVocabRemapping.OP_NAME, + inputsClass = GenerateVocabRemapping.Inputs.class +) @Operator( group = "train" ) @@ -72,8 +78,8 @@ public final class GenerateVocabRemapping extends RawOp { private Output numPresent; - private GenerateVocabRemapping(Operation operation) { - super(operation); + public GenerateVocabRemapping(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; remapping = operation.output(outputIdx++); numPresent = operation.output(outputIdx++); @@ -163,6 +169,9 @@ public Options oldVocabSize(Long oldVocabSize) { } } + @OpInputsMetadata( + outputsClass = GenerateVocabRemapping.class + ) public static class Inputs extends RawOpInputs { /** * Path to the new vocab file. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java index 2d45a0379dc..38256a613e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/MergeV2Checkpoints.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TString; @@ -38,6 +40,10 @@ * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. */ +@OpMetadata( + opType = MergeV2Checkpoints.OP_NAME, + inputsClass = MergeV2Checkpoints.Inputs.class +) @Operator( group = "train" ) @@ -47,8 +53,8 @@ public final class MergeV2Checkpoints extends RawOp { */ public static final String OP_NAME = "MergeV2Checkpoints"; - private MergeV2Checkpoints(Operation operation) { - super(operation); + public MergeV2Checkpoints(Operation operation) { + super(operation, OP_NAME); } /** @@ -110,6 +116,9 @@ public Options deleteOldDirs(Boolean deleteOldDirs) { } } + @OpInputsMetadata( + outputsClass = MergeV2Checkpoints.class + ) public static class Inputs extends RawOpInputs { /** * prefixes of V2 checkpoints to merge. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java index 491a6c2da92..a997c7064ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/NegTrain.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -34,6 +36,10 @@ /** * Training via negative sampling. */ +@OpMetadata( + opType = NegTrain.OP_NAME, + inputsClass = NegTrain.Inputs.class +) @Operator( group = "train" ) @@ -43,8 +49,8 @@ public final class NegTrain extends RawOp { */ public static final String OP_NAME = "NegTrain"; - private NegTrain(Operation operation) { - super(operation); + public NegTrain(Operation operation) { + super(operation, OP_NAME); } /** @@ -81,6 +87,9 @@ public static NegTrain create(Scope scope, Operand wIn, Operand { /** * input word embedding. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java index 884fffc0487..3d9900c40ae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = PreventGradient.OP_NAME, + inputsClass = PreventGradient.Inputs.class +) @Operator( group = "train" ) @@ -53,8 +59,8 @@ public final class PreventGradient extends RawOp implements Ope private Output output; - private PreventGradient(Operation operation) { - super(operation); + public PreventGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -132,6 +138,9 @@ public Options message(String message) { } } + @OpInputsMetadata( + outputsClass = PreventGradient.class + ) public static class Inputs extends RawOpInputs> { /** * any tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java index db829374d76..6a425a7d10f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorApplyGradient.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Applies a gradient to a given accumulator. * Does not add if local_step is lesser than the accumulator's global_step. */ +@OpMetadata( + opType = ResourceAccumulatorApplyGradient.OP_NAME, + inputsClass = ResourceAccumulatorApplyGradient.Inputs.class +) public final class ResourceAccumulatorApplyGradient extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ResourceAccumulatorApplyGradient"; - private ResourceAccumulatorApplyGradient(Operation operation) { - super(operation); + public ResourceAccumulatorApplyGradient(Operation operation) { + super(operation, OP_NAME); } /** @@ -66,6 +72,9 @@ public static ResourceAccumulatorApplyGradient create(Scope scope, return new ResourceAccumulatorApplyGradient(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceAccumulatorApplyGradient.class + ) public static class Inputs extends RawOpInputs { /** * The handle to a accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java index c9cc0a3c4a2..c5e181665f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorNumAccumulated.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; /** * Returns the number of gradients aggregated in the given accumulators. */ +@OpMetadata( + opType = ResourceAccumulatorNumAccumulated.OP_NAME, + inputsClass = ResourceAccumulatorNumAccumulated.Inputs.class +) public final class ResourceAccumulatorNumAccumulated extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -41,8 +47,8 @@ public final class ResourceAccumulatorNumAccumulated extends RawOp implements Op private Output numAccumulated; - private ResourceAccumulatorNumAccumulated(Operation operation) { - super(operation); + public ResourceAccumulatorNumAccumulated(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; numAccumulated = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return numAccumulated; } + @OpInputsMetadata( + outputsClass = ResourceAccumulatorNumAccumulated.class + ) public static class Inputs extends RawOpInputs { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java index 23141dd5d10..226492c11c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorSetGlobalStep.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * Logs warning if the accumulator's value is already higher than * new_global_step. */ +@OpMetadata( + opType = ResourceAccumulatorSetGlobalStep.OP_NAME, + inputsClass = ResourceAccumulatorSetGlobalStep.Inputs.class +) public final class ResourceAccumulatorSetGlobalStep extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ResourceAccumulatorSetGlobalStep"; - private ResourceAccumulatorSetGlobalStep(Operation operation) { - super(operation); + public ResourceAccumulatorSetGlobalStep(Operation operation) { + super(operation, OP_NAME); } /** @@ -63,6 +69,9 @@ public static ResourceAccumulatorSetGlobalStep create(Scope scope, return new ResourceAccumulatorSetGlobalStep(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = ResourceAccumulatorSetGlobalStep.class + ) public static class Inputs extends RawOpInputs { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java index 1cfc15beb3c..cbe1227ee7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code average} output */ +@OpMetadata( + opType = ResourceAccumulatorTakeGradient.OP_NAME, + inputsClass = ResourceAccumulatorTakeGradient.Inputs.class +) public final class ResourceAccumulatorTakeGradient extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -50,8 +56,8 @@ public final class ResourceAccumulatorTakeGradient extends RawO private Output average; - private ResourceAccumulatorTakeGradient(Operation operation) { - super(operation); + public ResourceAccumulatorTakeGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; average = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return average; } + @OpInputsMetadata( + outputsClass = ResourceAccumulatorTakeGradient.class + ) public static class Inputs extends RawOpInputs> { /** * The handle to an accumulator. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java index 336031fd4ae..2323ac98843 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdaMax.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,14 +37,18 @@ * v_t <- max(beta2 * v_{t-1}, abs(g)) * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) */ +@OpMetadata( + opType = ResourceApplyAdaMax.OP_NAME, + inputsClass = ResourceApplyAdaMax.Inputs.class +) public final class ResourceApplyAdaMax extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ResourceApplyAdaMax"; - private ResourceApplyAdaMax(Operation operation) { - super(operation); + public ResourceApplyAdaMax(Operation operation) { + super(operation, OP_NAME); } /** @@ -124,6 +130,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdaMax.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java index 11c2202c18b..2b2a9be5d92 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdadelta.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; */ +@OpMetadata( + opType = ResourceApplyAdadelta.OP_NAME, + inputsClass = ResourceApplyAdadelta.Inputs.class +) @Operator( group = "train" ) @@ -46,8 +52,8 @@ public final class ResourceApplyAdadelta extends RawOp { */ public static final String OP_NAME = "ResourceApplyAdadelta"; - private ResourceApplyAdadelta(Operation operation) { - super(operation); + public ResourceApplyAdadelta(Operation operation) { + super(operation, OP_NAME); } /** @@ -123,6 +129,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdadelta.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java index 29042e8ccd3..512af640a14 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagrad.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -34,14 +36,18 @@ * accum += grad * grad * var -= lr * grad * (1 / (sqrt(accum) + epsilon)) */ +@OpMetadata( + opType = ResourceApplyAdagrad.OP_NAME, + inputsClass = ResourceApplyAdagrad.Inputs.class +) public final class ResourceApplyAdagrad extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ResourceApplyAdagradV2"; - private ResourceApplyAdagrad(Operation operation) { - super(operation); + public ResourceApplyAdagrad(Operation operation) { + super(operation, OP_NAME); } /** @@ -140,6 +146,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdagrad.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java index b153e0c2cf6..f2d80bafeac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdagradDa.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -34,6 +36,10 @@ /** * Update '*var' according to the proximal adagrad scheme. */ +@OpMetadata( + opType = ResourceApplyAdagradDa.OP_NAME, + inputsClass = ResourceApplyAdagradDa.Inputs.class +) @Operator( group = "train" ) @@ -43,8 +49,8 @@ public final class ResourceApplyAdagradDa extends RawOp { */ public static final String OP_NAME = "ResourceApplyAdagradDA"; - private ResourceApplyAdagradDa(Operation operation) { - super(operation); + public ResourceApplyAdagradDa(Operation operation) { + super(operation, OP_NAME); } /** @@ -122,6 +128,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdagradDa.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java index d58a5e916f1..a0fa6645311 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -37,6 +39,10 @@ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ */ +@OpMetadata( + opType = ResourceApplyAdam.OP_NAME, + inputsClass = ResourceApplyAdam.Inputs.class +) @Operator( group = "train" ) @@ -46,8 +52,8 @@ public final class ResourceApplyAdam extends RawOp { */ public static final String OP_NAME = "ResourceApplyAdam"; - private ResourceApplyAdam(Operation operation) { - super(operation); + public ResourceApplyAdam(Operation operation) { + super(operation, OP_NAME); } /** @@ -157,6 +163,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdam.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java index 38df623446b..ad21624b125 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdamWithAmsgrad.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * $$\hat{v}t := max{\hat{v}{t-1}, v_t}$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ */ +@OpMetadata( + opType = ResourceApplyAdamWithAmsgrad.OP_NAME, + inputsClass = ResourceApplyAdamWithAmsgrad.Inputs.class +) @Operator( group = "train" ) @@ -47,8 +53,8 @@ public final class ResourceApplyAdamWithAmsgrad extends RawOp { */ public static final String OP_NAME = "ResourceApplyAdamWithAmsgrad"; - private ResourceApplyAdamWithAmsgrad(Operation operation) { - super(operation); + public ResourceApplyAdamWithAmsgrad(Operation operation) { + super(operation, OP_NAME); } /** @@ -134,6 +140,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAdamWithAmsgrad.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java index d034cd3bf0d..250296eb0cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAddSign.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update */ +@OpMetadata( + opType = ResourceApplyAddSign.OP_NAME, + inputsClass = ResourceApplyAddSign.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class ResourceApplyAddSign extends RawOp { */ public static final String OP_NAME = "ResourceApplyAddSign"; - private ResourceApplyAddSign(Operation operation) { - super(operation); + public ResourceApplyAddSign(Operation operation) { + super(operation, OP_NAME); } /** @@ -123,6 +129,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyAddSign.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java index 55ce63a208c..ae523e0de04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyCenteredRmsProp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -47,6 +49,10 @@ * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom */ +@OpMetadata( + opType = ResourceApplyCenteredRmsProp.OP_NAME, + inputsClass = ResourceApplyCenteredRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -56,8 +62,8 @@ public final class ResourceApplyCenteredRmsProp extends RawOp { */ public static final String OP_NAME = "ResourceApplyCenteredRMSProp"; - private ResourceApplyCenteredRmsProp(Operation operation) { - super(operation); + public ResourceApplyCenteredRmsProp(Operation operation) { + super(operation, OP_NAME); } /** @@ -139,6 +145,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyCenteredRmsProp.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java index 8d3540eda02..66b8fbb0c4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyFtrl.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new */ +@OpMetadata( + opType = ResourceApplyFtrl.OP_NAME, + inputsClass = ResourceApplyFtrl.Inputs.class +) @Operator( group = "train" ) @@ -49,8 +55,8 @@ public final class ResourceApplyFtrl extends RawOp { */ public static final String OP_NAME = "ResourceApplyFtrlV2"; - private ResourceApplyFtrl(Operation operation) { - super(operation); + public ResourceApplyFtrl(Operation operation) { + super(operation, OP_NAME); } /** @@ -158,6 +164,9 @@ public Options multiplyLinearByLr(Boolean multiplyLinearByLr) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyFtrl.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java index f33601521df..c5d9596e87c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyGradientDescent.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -33,6 +35,10 @@ /** * Update '*var' by subtracting 'alpha' * 'delta' from it. */ +@OpMetadata( + opType = ResourceApplyGradientDescent.OP_NAME, + inputsClass = ResourceApplyGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -42,8 +48,8 @@ public final class ResourceApplyGradientDescent extends RawOp { */ public static final String OP_NAME = "ResourceApplyGradientDescent"; - private ResourceApplyGradientDescent(Operation operation) { - super(operation); + public ResourceApplyGradientDescent(Operation operation) { + super(operation, OP_NAME); } /** @@ -109,6 +115,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyGradientDescent.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java index 39edfec6216..c5a0f246da4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyKerasMomentum.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ *

    accum = accum * momentum - lr * grad * var += accum */ +@OpMetadata( + opType = ResourceApplyKerasMomentum.OP_NAME, + inputsClass = ResourceApplyKerasMomentum.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class ResourceApplyKerasMomentum extends RawOp { */ public static final String OP_NAME = "ResourceApplyKerasMomentum"; - private ResourceApplyKerasMomentum(Operation operation) { - super(operation); + public ResourceApplyKerasMomentum(Operation operation) { + super(operation, OP_NAME); } /** @@ -149,6 +155,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyKerasMomentum.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java index 6bb224aa4e4..432fbdad8c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyMomentum.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ *

    accum = accum * momentum + grad * var -= lr * accum */ +@OpMetadata( + opType = ResourceApplyMomentum.OP_NAME, + inputsClass = ResourceApplyMomentum.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class ResourceApplyMomentum extends RawOp { */ public static final String OP_NAME = "ResourceApplyMomentum"; - private ResourceApplyMomentum(Operation operation) { - super(operation); + public ResourceApplyMomentum(Operation operation) { + super(operation, OP_NAME); } /** @@ -149,6 +155,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyMomentum.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java index 3df2066af31..535cc617988 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyPowerSign.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update */ +@OpMetadata( + opType = ResourceApplyPowerSign.OP_NAME, + inputsClass = ResourceApplyPowerSign.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class ResourceApplyPowerSign extends RawOp { */ public static final String OP_NAME = "ResourceApplyPowerSign"; - private ResourceApplyPowerSign(Operation operation) { - super(operation); + public ResourceApplyPowerSign(Operation operation) { + super(operation, OP_NAME); } /** @@ -123,6 +129,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyPowerSign.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java index 5e251235af1..6805a4b1a0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalAdagrad.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} */ +@OpMetadata( + opType = ResourceApplyProximalAdagrad.OP_NAME, + inputsClass = ResourceApplyProximalAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class ResourceApplyProximalAdagrad extends RawOp { */ public static final String OP_NAME = "ResourceApplyProximalAdagrad"; - private ResourceApplyProximalAdagrad(Operation operation) { - super(operation); + public ResourceApplyProximalAdagrad(Operation operation) { + super(operation, OP_NAME); } /** @@ -119,6 +125,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyProximalAdagrad.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java index 1a3da62fbf4..e635dbe42c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyProximalGradientDescent.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} */ +@OpMetadata( + opType = ResourceApplyProximalGradientDescent.OP_NAME, + inputsClass = ResourceApplyProximalGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -44,8 +50,8 @@ public final class ResourceApplyProximalGradientDescent extends RawOp { */ public static final String OP_NAME = "ResourceApplyProximalGradientDescent"; - private ResourceApplyProximalGradientDescent(Operation operation) { - super(operation); + public ResourceApplyProximalGradientDescent(Operation operation) { + super(operation, OP_NAME); } /** @@ -116,6 +122,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyProximalGradientDescent.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java index 6d2fe91d598..5d2b0aa662f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyRmsProp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -41,6 +43,10 @@ * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom */ +@OpMetadata( + opType = ResourceApplyRmsProp.OP_NAME, + inputsClass = ResourceApplyRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class ResourceApplyRmsProp extends RawOp { */ public static final String OP_NAME = "ResourceApplyRMSProp"; - private ResourceApplyRmsProp(Operation operation) { - super(operation); + public ResourceApplyRmsProp(Operation operation) { + super(operation, OP_NAME); } /** @@ -131,6 +137,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceApplyRmsProp.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java index 4cc51c55289..d0a4c09b894 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceConditionalAccumulator.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -43,6 +45,10 @@ * This is a resource version of ConditionalAccumulator that will work in TF2.0 * with tf.cond version 2. */ +@OpMetadata( + opType = ResourceConditionalAccumulator.OP_NAME, + inputsClass = ResourceConditionalAccumulator.Inputs.class +) public final class ResourceConditionalAccumulator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -52,8 +58,8 @@ public final class ResourceConditionalAccumulator extends RawOp implements Opera private Output handle; @SuppressWarnings("unchecked") - private ResourceConditionalAccumulator(Operation operation) { - super(operation); + public ResourceConditionalAccumulator(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } @@ -188,6 +194,9 @@ public Options reductionType(String reductionType) { } } + @OpInputsMetadata( + outputsClass = ResourceConditionalAccumulator.class + ) public static class Inputs extends RawOpInputs { /** * The type of the value being accumulated. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java index 01f0a739f36..5c9b4640376 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdadelta.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -34,6 +36,10 @@ /** * var: Should be from a Variable(). */ +@OpMetadata( + opType = ResourceSparseApplyAdadelta.OP_NAME, + inputsClass = ResourceSparseApplyAdadelta.Inputs.class +) @Operator( group = "train" ) @@ -43,8 +49,8 @@ public final class ResourceSparseApplyAdadelta extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyAdadelta"; - private ResourceSparseApplyAdadelta(Operation operation) { - super(operation); + public ResourceSparseApplyAdadelta(Operation operation) { + super(operation, OP_NAME); } /** @@ -122,6 +128,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyAdadelta.class + ) public static class Inputs extends RawOpInputs { /** * The var input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java index aaa5be414ea..8de948f6d86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagrad.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) */ +@OpMetadata( + opType = ResourceSparseApplyAdagrad.OP_NAME, + inputsClass = ResourceSparseApplyAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -46,8 +52,8 @@ public final class ResourceSparseApplyAdagrad extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyAdagrad"; - private ResourceSparseApplyAdagrad(Operation operation) { - super(operation); + public ResourceSparseApplyAdagrad(Operation operation) { + super(operation, OP_NAME); } /** @@ -146,6 +152,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyAdagrad.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java index 6a410002f3a..bd748830dde 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradDa.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -35,6 +37,10 @@ /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. */ +@OpMetadata( + opType = ResourceSparseApplyAdagradDa.OP_NAME, + inputsClass = ResourceSparseApplyAdagradDa.Inputs.class +) @Operator( group = "train" ) @@ -44,8 +50,8 @@ public final class ResourceSparseApplyAdagradDa extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyAdagradDA"; - private ResourceSparseApplyAdagradDa(Operation operation) { - super(operation); + public ResourceSparseApplyAdagradDa(Operation operation) { + super(operation, OP_NAME); } /** @@ -126,6 +132,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyAdagradDa.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java index 423505a39b5..594f0e4a1fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyAdagradV2.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -36,14 +38,18 @@ * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) */ +@OpMetadata( + opType = ResourceSparseApplyAdagradV2.OP_NAME, + inputsClass = ResourceSparseApplyAdagradV2.Inputs.class +) public final class ResourceSparseApplyAdagradV2 extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ public static final String OP_NAME = "ResourceSparseApplyAdagradV2"; - private ResourceSparseApplyAdagradV2(Operation operation) { - super(operation); + public ResourceSparseApplyAdagradV2(Operation operation) { + super(operation, OP_NAME); } /** @@ -144,6 +150,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyAdagradV2.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java index 2789f2fb191..36f3d3f1aa8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyCenteredRmsProp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -47,6 +49,10 @@ * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom */ +@OpMetadata( + opType = ResourceSparseApplyCenteredRmsProp.OP_NAME, + inputsClass = ResourceSparseApplyCenteredRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -56,8 +62,8 @@ public final class ResourceSparseApplyCenteredRmsProp extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyCenteredRMSProp"; - private ResourceSparseApplyCenteredRmsProp(Operation operation) { - super(operation); + public ResourceSparseApplyCenteredRmsProp(Operation operation) { + super(operation, OP_NAME); } /** @@ -141,6 +147,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyCenteredRmsProp.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java index cfa21d59dc0..2e76781ef93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyFtrl.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new */ +@OpMetadata( + opType = ResourceSparseApplyFtrl.OP_NAME, + inputsClass = ResourceSparseApplyFtrl.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class ResourceSparseApplyFtrl extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyFtrlV2"; - private ResourceSparseApplyFtrl(Operation operation) { - super(operation); + public ResourceSparseApplyFtrl(Operation operation) { + super(operation, OP_NAME); } /** @@ -162,6 +168,9 @@ public Options multiplyLinearByLr(Boolean multiplyLinearByLr) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyFtrl.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java index 94c2ad9bd17..50a6fbbf44d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyKerasMomentum.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ *

    accum = accum * momentum - lr * grad * var += accum */ +@OpMetadata( + opType = ResourceSparseApplyKerasMomentum.OP_NAME, + inputsClass = ResourceSparseApplyKerasMomentum.Inputs.class +) @Operator( group = "train" ) @@ -47,8 +53,8 @@ public final class ResourceSparseApplyKerasMomentum extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyKerasMomentum"; - private ResourceSparseApplyKerasMomentum(Operation operation) { - super(operation); + public ResourceSparseApplyKerasMomentum(Operation operation) { + super(operation, OP_NAME); } /** @@ -153,6 +159,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyKerasMomentum.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java index 1b52c885e67..ab6a217555d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyMomentum.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ *

    accum = accum * momentum + grad * var -= lr * accum */ +@OpMetadata( + opType = ResourceSparseApplyMomentum.OP_NAME, + inputsClass = ResourceSparseApplyMomentum.Inputs.class +) @Operator( group = "train" ) @@ -47,8 +53,8 @@ public final class ResourceSparseApplyMomentum extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyMomentum"; - private ResourceSparseApplyMomentum(Operation operation) { - super(operation); + public ResourceSparseApplyMomentum(Operation operation) { + super(operation, OP_NAME); } /** @@ -153,6 +159,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyMomentum.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java index 57b484d3b21..c89159095cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalAdagrad.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * prox_v -= lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} */ +@OpMetadata( + opType = ResourceSparseApplyProximalAdagrad.OP_NAME, + inputsClass = ResourceSparseApplyProximalAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -48,8 +54,8 @@ public final class ResourceSparseApplyProximalAdagrad extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyProximalAdagrad"; - private ResourceSparseApplyProximalAdagrad(Operation operation) { - super(operation); + public ResourceSparseApplyProximalAdagrad(Operation operation) { + super(operation, OP_NAME); } /** @@ -124,6 +130,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyProximalAdagrad.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java index 2d266131daf..2fcd22d2bf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyProximalGradientDescent.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * prox_v = var - alpha * grad * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} */ +@OpMetadata( + opType = ResourceSparseApplyProximalGradientDescent.OP_NAME, + inputsClass = ResourceSparseApplyProximalGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -46,8 +52,8 @@ public final class ResourceSparseApplyProximalGradientDescent extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyProximalGradientDescent"; - private ResourceSparseApplyProximalGradientDescent(Operation operation) { - super(operation); + public ResourceSparseApplyProximalGradientDescent(Operation operation) { + super(operation, OP_NAME); } /** @@ -120,6 +126,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyProximalGradientDescent.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java index ee73c441b8f..149a8003b46 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceSparseApplyRmsProp.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom */ +@OpMetadata( + opType = ResourceSparseApplyRmsProp.OP_NAME, + inputsClass = ResourceSparseApplyRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class ResourceSparseApplyRmsProp extends RawOp { */ public static final String OP_NAME = "ResourceSparseApplyRMSProp"; - private ResourceSparseApplyRmsProp(Operation operation) { - super(operation); + public ResourceSparseApplyRmsProp(Operation operation) { + super(operation, OP_NAME); } /** @@ -134,6 +140,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = ResourceSparseApplyRmsProp.class + ) public static class Inputs extends RawOpInputs { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java index 07682b56434..e2c180213e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Restore.java @@ -30,6 +30,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -51,6 +53,10 @@ * strings and correspondingly well-formed. *

    Callers must ensure all the named tensors are indeed stored in the checkpoint. */ +@OpMetadata( + opType = Restore.OP_NAME, + inputsClass = Restore.Inputs.class +) @Operator( group = "train" ) @@ -63,8 +69,8 @@ public final class Restore extends RawOp implements Iterable> { private List> tensors; @SuppressWarnings("unchecked") - private Restore(Operation operation) { - super(operation); + public Restore(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int tensorsLength = operation.outputListLength("tensors"); tensors = Arrays.asList(operation.outputList(outputIdx, tensorsLength)); @@ -112,6 +118,9 @@ public Iterator> iterator() { return (Iterator) tensors.iterator(); } + @OpInputsMetadata( + outputsClass = Restore.class + ) public static class Inputs extends RawOpInputs { /** * Must have a single element. The prefix of a V2 checkpoint. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java index ee5496926cc..0237357a838 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -43,6 +45,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = RestoreSlice.OP_NAME, + inputsClass = RestoreSlice.Inputs.class +) @Operator( group = "train" ) @@ -54,8 +60,8 @@ public final class RestoreSlice extends RawOp implements Operan private Output tensor; - private RestoreSlice(Operation operation) { - super(operation); + public RestoreSlice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); } @@ -143,6 +149,9 @@ public Options preferredShard(Long preferredShard) { } } + @OpInputsMetadata( + outputsClass = RestoreSlice.class + ) public static class Inputs extends RawOpInputs> { /** * Must have a single element. The pattern of the files from diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java index 57b0d9dc6df..d0386275ef4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/Save.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -37,6 +39,10 @@ * specific slices of full tensors, "shape_and_slices" should be non-empty strings * and correspondingly well-formed. */ +@OpMetadata( + opType = Save.OP_NAME, + inputsClass = Save.Inputs.class +) @Operator( group = "train" ) @@ -46,8 +52,8 @@ public final class Save extends RawOp { */ public static final String OP_NAME = "SaveV2"; - private Save(Operation operation) { - super(operation); + public Save(Operation operation) { + super(operation, OP_NAME); } /** @@ -75,6 +81,9 @@ public static Save create(Scope scope, Operand prefix, Operand return new Save(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = Save.class + ) public static class Inputs extends RawOpInputs { /** * Must have a single element. The prefix of the V2 checkpoint to which we diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java index e1b2ab7325a..ff034d1d6fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SaveSlices.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TString; @@ -54,6 +56,10 @@ * *

    See also {@code Save}. */ +@OpMetadata( + opType = SaveSlices.OP_NAME, + inputsClass = SaveSlices.Inputs.class +) @Operator( group = "train" ) @@ -63,8 +69,8 @@ public final class SaveSlices extends RawOp { */ public static final String OP_NAME = "SaveSlices"; - private SaveSlices(Operation operation) { - super(operation); + public SaveSlices(Operation operation) { + super(operation, OP_NAME); } /** @@ -92,6 +98,9 @@ public static SaveSlices create(Scope scope, Operand filename, return new SaveSlices(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = SaveSlices.class + ) public static class Inputs extends RawOpInputs { /** * Must have a single element. The name of the file to which we write the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java index 0d7361ffed2..d37ce836e43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaFprint.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -34,6 +36,10 @@ /** * Computes fingerprints of the input strings. */ +@OpMetadata( + opType = SdcaFprint.OP_NAME, + inputsClass = SdcaFprint.Inputs.class +) @Operator( group = "train" ) @@ -45,8 +51,8 @@ public final class SdcaFprint extends RawOp implements Operand { private Output output; - private SdcaFprint(Operation operation) { - super(operation); + public SdcaFprint(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -82,6 +88,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SdcaFprint.class + ) public static class Inputs extends RawOpInputs { /** * vector of strings to compute fingerprints on. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java index 3d271080556..83990a329e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaOptimizer.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -48,6 +50,10 @@ *

    Stochastic Dual Coordinate Ascent with Adaptive Probabilities .
    * Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 */ +@OpMetadata( + opType = SdcaOptimizer.OP_NAME, + inputsClass = SdcaOptimizer.Inputs.class +) public final class SdcaOptimizer extends RawOp { /** * The name of this op, as known by TensorFlow core engine @@ -61,8 +67,8 @@ public final class SdcaOptimizer extends RawOp { private List> outDeltaDenseWeights; @SuppressWarnings("unchecked") - private SdcaOptimizer(Operation operation) { - super(operation); + public SdcaOptimizer(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outExampleStateData = operation.output(outputIdx++); int outDeltaSparseWeightsLength = operation.outputListLength("out_delta_sparse_weights"); @@ -200,6 +206,9 @@ public Options adaptive(Boolean adaptive) { } } + @OpInputsMetadata( + outputsClass = SdcaOptimizer.class + ) public static class Inputs extends RawOpInputs { /** * a list of vectors which contain example indices. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java index ac043e371b1..eda1af88933 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SdcaShrinkL1.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TFloat32; /** * Applies L1 regularization shrink step on the parameters. */ +@OpMetadata( + opType = SdcaShrinkL1.OP_NAME, + inputsClass = SdcaShrinkL1.Inputs.class +) @Operator( group = "train" ) @@ -42,8 +48,8 @@ public final class SdcaShrinkL1 extends RawOp { */ public static final String OP_NAME = "SdcaShrinkL1"; - private SdcaShrinkL1(Operation operation) { - super(operation); + public SdcaShrinkL1(Operation operation) { + super(operation, OP_NAME); } /** @@ -68,6 +74,9 @@ public static SdcaShrinkL1 create(Scope scope, Iterable> weigh return new SdcaShrinkL1(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = SdcaShrinkL1.class + ) public static class Inputs extends RawOpInputs { /** * a list of vectors where each value is the weight associated with a diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java index 5bb9b3f3d39..1ae14de5884 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -37,6 +39,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyAdadelta.OP_NAME, + inputsClass = SparseApplyAdadelta.Inputs.class +) @Operator( group = "train" ) @@ -48,8 +54,8 @@ public final class SparseApplyAdadelta extends RawOp implements private Output out; - private SparseApplyAdadelta(Operation operation) { - super(operation); + public SparseApplyAdadelta(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -142,6 +148,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyAdadelta.class + ) public static class Inputs extends RawOpInputs> { /** * The var input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java index 7485978329f..5f243bee63e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyAdagrad.OP_NAME, + inputsClass = SparseApplyAdagrad.Inputs.class +) public final class SparseApplyAdagrad extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine @@ -47,8 +53,8 @@ public final class SparseApplyAdagrad extends RawOp implements private Output out; - private SparseApplyAdagrad(Operation operation) { - super(operation); + public SparseApplyAdagrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -165,6 +171,9 @@ public Options updateSlots(Boolean updateSlots) { } } + @OpInputsMetadata( + outputsClass = SparseApplyAdagrad.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java index b07ad601f9f..7898f903fd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; @@ -38,6 +40,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyAdagradDa.OP_NAME, + inputsClass = SparseApplyAdagradDa.Inputs.class +) @Operator( group = "train" ) @@ -49,8 +55,8 @@ public final class SparseApplyAdagradDa extends RawOp implement private Output out; - private SparseApplyAdagradDa(Operation operation) { - super(operation); + public SparseApplyAdagradDa(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -146,6 +152,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyAdagradDa.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java index 02d36702a5e..bbe031b0f37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -50,6 +52,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyCenteredRmsProp.OP_NAME, + inputsClass = SparseApplyCenteredRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -61,8 +67,8 @@ public final class SparseApplyCenteredRmsProp extends RawOp imp private Output out; - private SparseApplyCenteredRmsProp(Operation operation) { - super(operation); + public SparseApplyCenteredRmsProp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -162,6 +168,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyCenteredRmsProp.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java index b093d454dd1..5627f95b0fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyFtrl.OP_NAME, + inputsClass = SparseApplyFtrl.Inputs.class +) @Operator( group = "train" ) @@ -56,8 +62,8 @@ public final class SparseApplyFtrl extends RawOp implements Ope private Output out; - private SparseApplyFtrl(Operation operation) { - super(operation); + public SparseApplyFtrl(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -183,6 +189,9 @@ public Options multiplyLinearByLr(Boolean multiplyLinearByLr) { } } + @OpInputsMetadata( + outputsClass = SparseApplyFtrl.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java index 9f36c04de51..1c78b7c93a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -41,6 +43,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyMomentum.OP_NAME, + inputsClass = SparseApplyMomentum.Inputs.class +) @Operator( group = "train" ) @@ -52,8 +58,8 @@ public final class SparseApplyMomentum extends RawOp implements private Output out; - private SparseApplyMomentum(Operation operation) { - super(operation); + public SparseApplyMomentum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -174,6 +180,9 @@ public Options useNesterov(Boolean useNesterov) { } } + @OpInputsMetadata( + outputsClass = SparseApplyMomentum.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java index d0f3ef64c4d..eff6fe8315b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyProximalAdagrad.OP_NAME, + inputsClass = SparseApplyProximalAdagrad.Inputs.class +) @Operator( group = "train" ) @@ -53,8 +59,8 @@ public final class SparseApplyProximalAdagrad extends RawOp imp private Output out; - private SparseApplyProximalAdagrad(Operation operation) { - super(operation); + public SparseApplyProximalAdagrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -145,6 +151,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyProximalAdagrad.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java index ee51c62e9bb..15a1696be37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyProximalGradientDescent.OP_NAME, + inputsClass = SparseApplyProximalGradientDescent.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class SparseApplyProximalGradientDescent extends R private Output out; - private SparseApplyProximalGradientDescent(Operation operation) { - super(operation); + public SparseApplyProximalGradientDescent(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -141,6 +147,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyProximalGradientDescent.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java index af67871862b..6cd2563cc64 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -45,6 +47,10 @@ * * @param data type for {@code out} output */ +@OpMetadata( + opType = SparseApplyRmsProp.OP_NAME, + inputsClass = SparseApplyRmsProp.Inputs.class +) @Operator( group = "train" ) @@ -56,8 +62,8 @@ public final class SparseApplyRmsProp extends RawOp implements private Output out; - private SparseApplyRmsProp(Operation operation) { - super(operation); + public SparseApplyRmsProp(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; out = operation.output(outputIdx++); } @@ -154,6 +160,9 @@ public Options useLocking(Boolean useLocking) { } } + @OpInputsMetadata( + outputsClass = SparseApplyRmsProp.class + ) public static class Inputs extends RawOpInputs> { /** * Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SymbolicGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SymbolicGradient.java index 205771ec423..ff194016743 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SymbolicGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SymbolicGradient.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * Computes the gradient function for function f via backpropagation. */ +@OpMetadata( + opType = SymbolicGradient.OP_NAME, + inputsClass = SymbolicGradient.Inputs.class +) @Operator( group = "train" ) @@ -50,8 +56,8 @@ public final class SymbolicGradient extends RawOp implements Iterable> output; @SuppressWarnings("unchecked") - private SymbolicGradient(Operation operation) { - super(operation); + public SymbolicGradient(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -107,6 +113,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = SymbolicGradient.class + ) public static class Inputs extends RawOpInputs { /** * a list of input tensors of size N + M; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java index ccfe689fd89..86cd960293c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = TileGrad.OP_NAME, + inputsClass = TileGrad.Inputs.class +) @Operator( group = "train" ) @@ -51,8 +57,8 @@ public final class TileGrad extends RawOp implements Operand private Output output; - private TileGrad(Operation operation) { - super(operation); + public TileGrad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = TileGrad.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java index 31b51e9ef81..7f1ed2e0693 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/BroadcastHelper.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code lhs_output} output */ +@OpMetadata( + opType = BroadcastHelper.OP_NAME, + inputsClass = BroadcastHelper.Inputs.class +) @Operator( group = "xla" ) @@ -53,8 +59,8 @@ public final class BroadcastHelper extends RawOp { private Output rhsOutput; - private BroadcastHelper(Operation operation) { - super(operation); + public BroadcastHelper(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; lhsOutput = operation.output(outputIdx++); rhsOutput = operation.output(outputIdx++); @@ -100,6 +106,9 @@ public Output rhsOutput() { return rhsOutput; } + @OpInputsMetadata( + outputsClass = BroadcastHelper.class + ) public static class Inputs extends RawOpInputs> { /** * the LHS input tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java index ad111378f34..4ae6908f1aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ClusterOutput.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code outputs} output */ +@OpMetadata( + opType = ClusterOutput.OP_NAME, + inputsClass = ClusterOutput.Inputs.class +) @Operator( group = "xla" ) @@ -47,8 +53,8 @@ public final class ClusterOutput extends RawOp implements Opera private Output outputs; - private ClusterOutput(Operation operation) { - super(operation); + public ClusterOutput(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; outputs = operation.output(outputIdx++); } @@ -84,6 +90,9 @@ public Output asOutput() { return outputs; } + @OpInputsMetadata( + outputsClass = ClusterOutput.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java index e8df7159368..0c57763cd5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Conv.OP_NAME, + inputsClass = Conv.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class Conv extends RawOp implements Operand { private Output output; - private Conv(Operation operation) { - super(operation); + public Conv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -111,6 +117,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Conv.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java index 03cece9dc1e..7fc5e4adb2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dequantize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TBfloat16; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * Takes the packed uint32 input and unpacks the input to uint8 to do * Dequantization on device. */ +@OpMetadata( + opType = Dequantize.OP_NAME, + inputsClass = Dequantize.Inputs.class +) @Operator( group = "xla" ) @@ -46,8 +52,8 @@ public final class Dequantize extends RawOp implements Operand { private Output output; - private Dequantize(Operation operation) { - super(operation); + public Dequantize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Dequantize.class + ) public static class Inputs extends RawOpInputs { /** * Input tensors whose types is uint32, shape is [d0, ..., dn]. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java index 5e5c78e362c..16af0ba81e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Dot.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Dot.OP_NAME, + inputsClass = Dot.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class Dot extends RawOp implements Operand { private Output output; - private Dot(Operation operation) { - super(operation); + public Dot(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Dot.class + ) public static class Inputs extends RawOpInputs> { /** * the LHS tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java index 65f082eabc2..6062f75e5ed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicSlice.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DynamicSlice.OP_NAME, + inputsClass = DynamicSlice.Inputs.class +) @Operator( group = "xla" ) @@ -55,8 +61,8 @@ public final class DynamicSlice extends RawOp implements Operan private Output output; - private DynamicSlice(Operation operation) { - super(operation); + public DynamicSlice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DynamicSlice.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java index e1a4c0e7574..54ee6f4b457 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/DynamicUpdateSlice.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -44,6 +46,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = DynamicUpdateSlice.OP_NAME, + inputsClass = DynamicUpdateSlice.Inputs.class +) @Operator( group = "xla" ) @@ -55,8 +61,8 @@ public final class DynamicUpdateSlice extends RawOp implements private Output output; - private DynamicUpdateSlice(Operation operation) { - super(operation); + public DynamicUpdateSlice(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = DynamicUpdateSlice.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java index 3048c1e6f06..72d482e1fe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Einsum.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ * * @param data type for {@code product} output */ +@OpMetadata( + opType = Einsum.OP_NAME, + inputsClass = Einsum.Inputs.class +) @Operator( group = "xla" ) @@ -49,8 +55,8 @@ public final class Einsum extends RawOp implements Operand { private Output product; - private Einsum(Operation operation) { - super(operation); + public Einsum(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; product = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return product; } + @OpInputsMetadata( + outputsClass = Einsum.class + ) public static class Inputs extends RawOpInputs> { /** * The a input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java index d8b7cf0421a..63f75f76011 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Gather.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -38,6 +40,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Gather.OP_NAME, + inputsClass = Gather.Inputs.class +) @Operator( group = "xla" ) @@ -49,8 +55,8 @@ public final class Gather extends RawOp implements Operand { private Output output; - private Gather(Operation operation) { - super(operation); + public Gather(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -97,6 +103,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Gather.class + ) public static class Inputs extends RawOpInputs> { /** * The array we're gathering from. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/If.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/If.java index 31ac4815360..4422e0bd391 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/If.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/If.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * output = cond ? then_branch(inputs) : else_branch(inputs). */ +@OpMetadata( + opType = If.OP_NAME, + inputsClass = If.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class If extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private If(Operation operation) { - super(operation); + public If(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -103,6 +109,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = If.class + ) public static class Inputs extends RawOpInputs { /** * A boolean scalar. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java index ad72e80c6f0..72d4941485c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/KeyValueSort.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -42,6 +44,10 @@ * * @param data type for {@code sorted_values} output */ +@OpMetadata( + opType = KeyValueSort.OP_NAME, + inputsClass = KeyValueSort.Inputs.class +) @Operator( group = "xla" ) @@ -55,8 +61,8 @@ public final class KeyValueSort extends RawO private Output sortedValues; - private KeyValueSort(Operation operation) { - super(operation); + public KeyValueSort(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; sortedKeys = operation.output(outputIdx++); sortedValues = operation.output(outputIdx++); @@ -101,6 +107,9 @@ public Output sortedValues() { return sortedValues; } + @OpInputsMetadata( + outputsClass = KeyValueSort.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type K. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java index f28df38bbe0..ae2788bebe2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Pad.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Pad.OP_NAME, + inputsClass = Pad.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class Pad extends RawOp implements Operand { private Output output; - private Pad(Operation operation) { - super(operation); + public Pad(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -102,6 +108,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Pad.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java index a660cd82f4c..72ecccaa144 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Recv.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code tensor} output */ +@OpMetadata( + opType = Recv.OP_NAME, + inputsClass = Recv.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class Recv extends RawOp implements Operand { private Output tensor; - private Recv(Operation operation) { - super(operation); + public Recv(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; tensor = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return tensor; } + @OpInputsMetadata( + outputsClass = Recv.class + ) public static class Inputs extends RawOpInputs> { /** * The type of the tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Reduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Reduce.java index 3e25c7db1c0..8da34221d7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Reduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Reduce.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Reduce.OP_NAME, + inputsClass = Reduce.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class Reduce extends RawOp implements Operand { private Output output; - private Reduce(Operation operation) { - super(operation); + public Reduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -98,6 +104,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Reduce.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceWindow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceWindow.java index 41cf0d5cfbe..39385fc184c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceWindow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceWindow.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = ReduceWindow.OP_NAME, + inputsClass = ReduceWindow.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class ReduceWindow extends RawOp implements Operan private Output output; - private ReduceWindow(Operation operation) { - super(operation); + public ReduceWindow(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = ReduceWindow.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java index b657a92ee72..56f0bf33738 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = RemoveDynamicDimensionSize.OP_NAME, + inputsClass = RemoveDynamicDimensionSize.Inputs.class +) @Operator( group = "xla" ) @@ -52,8 +58,8 @@ public final class RemoveDynamicDimensionSize extends RawOp imp private Output output; - private RemoveDynamicDimensionSize(Operation operation) { - super(operation); + public RemoveDynamicDimensionSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -92,6 +98,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = RemoveDynamicDimensionSize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java index 9b12ea45151..9b061954c33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReplicaId.java @@ -27,12 +27,18 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; /** * Replica ID. */ +@OpMetadata( + opType = ReplicaId.OP_NAME, + inputsClass = ReplicaId.Inputs.class +) @Operator( group = "xla" ) @@ -44,8 +50,8 @@ public final class ReplicaId extends RawOp implements Operand { private Output id; - private ReplicaId(Operation operation) { - super(operation); + public ReplicaId(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; id = operation.output(outputIdx++); } @@ -78,6 +84,9 @@ public Output asOutput() { return id; } + @OpInputsMetadata( + outputsClass = ReplicaId.class + ) public static class Inputs extends RawOpInputs { public Inputs(GraphOperation op) { super(new ReplicaId(op), op, Arrays.asList()); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Scatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Scatter.java index f5114c2979a..0f9028d7e35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Scatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Scatter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Scatter.OP_NAME, + inputsClass = Scatter.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class Scatter extends RawOp implements Operand private Output output; - private Scatter(Operation operation) { - super(operation); + public Scatter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -101,6 +107,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Scatter.class + ) public static class Inputs extends RawOpInputs> { /** * Array to be scattered into. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelectAndScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelectAndScatter.java index 715c01a5a53..1da6395144c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelectAndScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelectAndScatter.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TNumber; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SelectAndScatter.OP_NAME, + inputsClass = SelectAndScatter.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class SelectAndScatter extends RawOp implements Op private Output output; - private SelectAndScatter(Operation operation) { - super(operation); + public SelectAndScatter(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -105,6 +111,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SelectAndScatter.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java index 0c190e36fe0..4d089c4f56f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SelfAdjointEig.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code w} output */ +@OpMetadata( + opType = SelfAdjointEig.OP_NAME, + inputsClass = SelfAdjointEig.Inputs.class +) @Operator( group = "xla" ) @@ -53,8 +59,8 @@ public final class SelfAdjointEig extends RawOp { private Output v; - private SelfAdjointEig(Operation operation) { - super(operation); + public SelfAdjointEig(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; w = operation.output(outputIdx++); v = operation.output(outputIdx++); @@ -108,6 +114,9 @@ public Output v() { return v; } + @OpInputsMetadata( + outputsClass = SelfAdjointEig.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java index bb5010128fa..18bdc7d2ba0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Send.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -35,6 +37,10 @@ * documented at * https://www.tensorflow.org/performance/xla/operation_semantics#send . */ +@OpMetadata( + opType = Send.OP_NAME, + inputsClass = Send.Inputs.class +) @Operator( group = "xla" ) @@ -44,8 +50,8 @@ public final class Send extends RawOp { */ public static final String OP_NAME = "XlaSend"; - private Send(Operation operation) { - super(operation); + public Send(Operation operation) { + super(operation, OP_NAME); } /** @@ -66,6 +72,9 @@ public static Send create(Scope scope, Operand tensor, String t return new Send(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = Send.class + ) public static class Inputs extends RawOpInputs { /** * The tensor to send. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SetDynamicDimensionSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SetDynamicDimensionSize.java index 00c232c5319..79ad9f2524c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SetDynamicDimensionSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SetDynamicDimensionSize.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -41,6 +43,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SetDynamicDimensionSize.OP_NAME, + inputsClass = SetDynamicDimensionSize.Inputs.class +) @Operator( group = "xla" ) @@ -52,8 +58,8 @@ public final class SetDynamicDimensionSize extends RawOp implem private Output output; - private SetDynamicDimensionSize(Operation operation) { - super(operation); + public SetDynamicDimensionSize(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -94,6 +100,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SetDynamicDimensionSize.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java index d39f3f93f21..6fb001ae681 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Sharding.OP_NAME, + inputsClass = Sharding.Inputs.class +) @Operator( group = "xla" ) @@ -47,8 +53,8 @@ public final class Sharding extends RawOp implements Operand private Output output; - private Sharding(Operation operation) { - super(operation); + public Sharding(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -124,6 +130,9 @@ public Options sharding(String sharding) { } } + @OpInputsMetadata( + outputsClass = Sharding.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java index 70056c2c7c1..470d2063e5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sort.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = Sort.OP_NAME, + inputsClass = Sort.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class Sort extends RawOp implements Operand { private Output output; - private Sort(Operation operation) { - super(operation); + public Sort(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -87,6 +93,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = Sort.class + ) public static class Inputs extends RawOpInputs> { /** * A {@code Tensor} of type T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java index b717ed45b8b..e8cfd9a709d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SpmdFullToShardShape.OP_NAME, + inputsClass = SpmdFullToShardShape.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class SpmdFullToShardShape extends RawOp implement private Output output; - private SpmdFullToShardShape(Operation operation) { - super(operation); + public SpmdFullToShardShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -91,6 +97,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SpmdFullToShardShape.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java index 3a86c8b7b26..5cba20d5891 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java @@ -28,6 +28,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -40,6 +42,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = SpmdShardToFullShape.OP_NAME, + inputsClass = SpmdShardToFullShape.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class SpmdShardToFullShape extends RawOp implement private Output output; - private SpmdShardToFullShape(Operation operation) { - super(operation); + public SpmdShardToFullShape(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -93,6 +99,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = SpmdShardToFullShape.class + ) public static class Inputs extends RawOpInputs> { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java index e6049fbf8fd..60ef26b9e5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Svd.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ * * @param data type for {@code s} output */ +@OpMetadata( + opType = Svd.OP_NAME, + inputsClass = Svd.Inputs.class +) @Operator( group = "xla" ) @@ -54,8 +60,8 @@ public final class Svd extends RawOp { private Output v; - private Svd(Operation operation) { - super(operation); + public Svd(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; s = operation.output(outputIdx++); u = operation.output(outputIdx++); @@ -117,6 +123,9 @@ public Output v() { return v; } + @OpInputsMetadata( + outputsClass = Svd.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/While.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/While.java index 80002335b2b..0fe6c0598d6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/While.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/While.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * output = input; While (Cond(output)) { output = Body(output) } */ +@OpMetadata( + opType = While.OP_NAME, + inputsClass = While.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class While extends RawOp implements Iterable> { private List> output; @SuppressWarnings("unchecked") - private While(Operation operation) { - super(operation); + public While(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList(operation.outputList(outputIdx, outputLength)); @@ -102,6 +108,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = While.class + ) public static class Inputs extends RawOpInputs { /** * A list of input tensors whose types are T. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java index fcb5d6234e0..603979a7195 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaHostCompute.java @@ -32,6 +32,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -39,6 +41,10 @@ /** * A pseudo-op to represent host-side computation in an XLA program. */ +@OpMetadata( + opType = XlaHostCompute.OP_NAME, + inputsClass = XlaHostCompute.Inputs.class +) @Operator( group = "xla" ) @@ -51,8 +57,8 @@ public final class XlaHostCompute extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private XlaHostCompute(Operation operation) { - super(operation); + public XlaHostCompute(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -229,6 +235,9 @@ public Options tpuCore(Long tpuCore) { } } + @OpInputsMetadata( + outputsClass = XlaHostCompute.class + ) public static class Inputs extends RawOpInputs { /** * A list of tensors that will be sent to the host. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaLaunch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaLaunch.java index d8c864cea8f..5098d6d7190 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaLaunch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaLaunch.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -38,6 +40,10 @@ /** * XLA Launch Op. For use by the XLA JIT only. */ +@OpMetadata( + opType = XlaLaunch.OP_NAME, + inputsClass = XlaLaunch.Inputs.class +) @Operator( group = "xla" ) @@ -50,8 +56,8 @@ public final class XlaLaunch extends RawOp implements Iterable> { private List> results; @SuppressWarnings("unchecked") - private XlaLaunch(Operation operation) { - super(operation); + public XlaLaunch(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int resultsLength = operation.outputListLength("results"); results = Arrays.asList(operation.outputList(outputIdx, resultsLength)); @@ -99,6 +105,9 @@ public Iterator> iterator() { return (Iterator) results.iterator(); } + @OpInputsMetadata( + outputsClass = XlaLaunch.class + ) public static class Inputs extends RawOpInputs { /** * The constants input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java index 289a7ff2853..1be2ef10874 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java @@ -29,6 +29,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = XlaRecvFromHost.OP_NAME, + inputsClass = XlaRecvFromHost.Inputs.class +) @Operator( group = "xla" ) @@ -53,8 +59,8 @@ public final class XlaRecvFromHost extends RawOp implements Ope private Output output; - private XlaRecvFromHost(Operation operation) { - super(operation); + public XlaRecvFromHost(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -95,6 +101,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = XlaRecvFromHost.class + ) public static class Inputs extends RawOpInputs> { /** * The Toutput attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java index 41478534837..535215c5a70 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSendToHost.java @@ -26,6 +26,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -36,6 +38,10 @@ * Tinput: element type for input. * key: A unique identifier for this region used to match up host transfers. */ +@OpMetadata( + opType = XlaSendToHost.OP_NAME, + inputsClass = XlaSendToHost.Inputs.class +) @Operator( group = "xla" ) @@ -45,8 +51,8 @@ public final class XlaSendToHost extends RawOp { */ public static final String OP_NAME = "XlaSendToHost"; - private XlaSendToHost(Operation operation) { - super(operation); + public XlaSendToHost(Operation operation) { + super(operation, OP_NAME); } /** @@ -67,6 +73,9 @@ public static XlaSendToHost create(Scope scope, Operand input, return new XlaSendToHost(opBuilder.build()); } + @OpInputsMetadata( + outputsClass = XlaSendToHost.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java index 7c51caf3b3b..630988d4d4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaSetBound.java @@ -27,6 +27,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.types.TInt32; @@ -36,6 +38,10 @@ * returns the same value. * */ +@OpMetadata( + opType = XlaSetBound.OP_NAME, + inputsClass = XlaSetBound.Inputs.class +) @Operator( group = "xla" ) @@ -47,8 +53,8 @@ public final class XlaSetBound extends RawOp implements Operand { private Output output; - private XlaSetBound(Operation operation) { - super(operation); + public XlaSetBound(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; output = operation.output(outputIdx++); } @@ -85,6 +91,9 @@ public Output asOutput() { return output; } + @OpInputsMetadata( + outputsClass = XlaSetBound.class + ) public static class Inputs extends RawOpInputs { /** * The input input diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java index 15523c32a6e..4ae22b1c842 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.family.TType; @@ -42,6 +44,10 @@ * * @param data type for {@code output} output */ +@OpMetadata( + opType = XlaVariadicReduce.OP_NAME, + inputsClass = XlaVariadicReduce.Inputs.class +) @Operator( group = "xla" ) @@ -54,8 +60,8 @@ public final class XlaVariadicReduce extends RawOp implements I private List> output; @SuppressWarnings("unchecked") - private XlaVariadicReduce(Operation operation) { - super(operation); + public XlaVariadicReduce(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputLength = operation.outputListLength("output"); output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); @@ -106,6 +112,9 @@ public Iterator> iterator() { return (Iterator) output.iterator(); } + @OpInputsMetadata( + outputsClass = XlaVariadicReduce.class + ) public static class Inputs extends RawOpInputs> { /** * the input tensor(s) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicSort.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicSort.java index 8570842977b..1f7bfc1d7fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicSort.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicSort.java @@ -31,6 +31,8 @@ import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.op.annotation.Operator; import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; @@ -43,6 +45,10 @@ *

    Sorts one or more tensors, with support for custom comparator, dimension, and * is_stable attributes. */ +@OpMetadata( + opType = XlaVariadicSort.OP_NAME, + inputsClass = XlaVariadicSort.Inputs.class +) @Operator( group = "xla" ) @@ -55,8 +61,8 @@ public final class XlaVariadicSort extends RawOp implements Iterable> outputs; @SuppressWarnings("unchecked") - private XlaVariadicSort(Operation operation) { - super(operation); + public XlaVariadicSort(Operation operation) { + super(operation, OP_NAME); int outputIdx = 0; int outputsLength = operation.outputListLength("outputs"); outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); @@ -103,6 +109,9 @@ public Iterator> iterator() { return (Iterator) outputs.iterator(); } + @OpInputsMetadata( + outputsClass = XlaVariadicSort.class + ) public static class Inputs extends RawOpInputs { /** * A list of {@code Tensor} of identical shape but possibly different types. diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/BaseGradientAdapter.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/BaseGradientAdapter.java new file mode 100644 index 00000000000..41ee090e15d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/BaseGradientAdapter.java @@ -0,0 +1,90 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow; + +import java.util.ArrayList; +import java.util.List; +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.internal.c_api.GradFunc; +import org.tensorflow.internal.c_api.NativeOutput; +import org.tensorflow.internal.c_api.NativeOutputVector; +import org.tensorflow.internal.c_api.Node; +import org.tensorflow.internal.c_api.TF_Operation; + +/** Helper base class for custom gradient adapters INTERNAL USE ONLY */ +public abstract class BaseGradientAdapter extends GradFunc { + + protected BaseGradientAdapter() { + super(); + } + + /** + * Convert an array of native outputs to a list of {@link Output}s. + * + * @param g the graph the outputs are in + * @param nativeOutputs the native outputs to convert + * @return a list of Outputs + */ + protected static List> fromNativeOutputs(Graph g, NativeOutputVector nativeOutputs) { + List> gradInputs = new ArrayList<>((int) nativeOutputs.size()); + for (int i = 0; i < nativeOutputs.size(); i++) { + NativeOutput output = nativeOutputs.get(i); + gradInputs.add(new Output<>(getGraphOp(g, output.node()), output.index())); + } + return gradInputs; + } + + /** + * Put the Java outputs into the array of native outputs, resizing it to the necessary size. + * + * @param outputs the outputs to put + * @param nativeOutputs the native array to put the outputs into + */ + protected static void putToNativeOutputs( + List> outputs, NativeOutputVector nativeOutputs) { + nativeOutputs.resize(outputs.size()); + for (int i = 0; i < outputs.size(); i++) { + Output output = outputs.get(i).asOutput(); + Node node = ((GraphOperation) output.op()).getUnsafeNativeHandle().node(); + nativeOutputs.put(i, new NativeOutput(node, output.index())); + } + } + + /** + * Make a {@link GraphOperation} from a native {@link Node} + * + * @param g the graph the operation is in + * @param node the native node + * @return a graph operation with the underlying native node + */ + protected static GraphOperation getGraphOp(Graph g, Node node) { + try (PointerScope scope = new PointerScope(); + Graph.Reference ref = g.ref()) { + return new GraphOperation(g, new TF_Operation(node)); + } + } + + /** + * Use builders without locking. This should only be used during custom gradient building. + * + *

    The graph locks are not re-entrant, so attempting to add an op to a graph that has been + * locked by the gradient builder will fail without this. + */ + protected static void useDangerousLockedBuilders(Graph g, boolean dangerous) { + g.setDangerousGradientBuilder(dangerous); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index f141e9dc551..ba90aa53f10 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -29,6 +29,7 @@ import org.tensorflow.internal.c_api.TFE_Context; import org.tensorflow.internal.c_api.TFE_ContextOptions; import org.tensorflow.internal.c_api.TF_Status; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.op.core.Assign; import org.tensorflow.op.core.Placeholder; @@ -397,7 +398,7 @@ void detach(Pointer... resources) { private final WeakPointerScope nativeResources; private TFE_Context nativeHandle; - private final Scope baseScope = new Scope(this); + private final Scope baseScope = new OpScope(this); private EagerSession(Options options) { this.nativeResources = new WeakPointerScope(); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java index 83aeb1a409b..18d8186d0b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Graph.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Queue; import java.util.Set; +import java.util.WeakHashMap; import java.util.stream.Collectors; import org.bytedeco.javacpp.BytePointer; import org.bytedeco.javacpp.Pointer; @@ -46,6 +47,7 @@ import org.bytedeco.javacpp.PointerScope; import org.bytedeco.javacpp.SizeTPointer; import org.tensorflow.exceptions.TensorFlowException; +import org.tensorflow.internal.c_api.NativeGraphPointer; import org.tensorflow.internal.c_api.TF_Buffer; import org.tensorflow.internal.c_api.TF_Function; import org.tensorflow.internal.c_api.TF_Graph; @@ -56,6 +58,7 @@ import org.tensorflow.internal.c_api.TF_WhileParams; import org.tensorflow.ndarray.StdArrays; import org.tensorflow.op.Op; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.core.Constant; @@ -81,14 +84,14 @@ public final class Graph implements ExecutionEnvironment, AutoCloseable { /** Create an empty Graph. */ public Graph() { - nativeHandle = allocate(); - this.baseScope = new Scope(this); + this(allocate()); } /** Create a Graph from an existing handle (takes ownership). */ Graph(TF_Graph nativeHandle) { this.nativeHandle = nativeHandle; - this.baseScope = new Scope(this); + this.baseScope = new OpScope(this); + allGraphs.add(this); } Graph(TF_Graph nativeHandle, SaverDef saverDef) { @@ -119,6 +122,7 @@ public void close() { } delete(nativeHandle); nativeHandle = null; + allGraphs.remove(this); } } @@ -215,7 +219,7 @@ public Output outputOrThrow(String output) { *

    The order of iteration is unspecified. Consumers of the iterator will receive no * notification should the underlying graph change during iteration. */ - public Iterator operations() { + public Iterator operations() { return new OperationIterator(this); } @@ -366,12 +370,22 @@ public synchronized Set subgraphFrom(Set> inputs) { return downstream; } + /** + * Returns a builder to add {@link Operation}s to the Graph. + * + * @param type of the Operation (i.e., identifies the computation to be performed) + * @param name to refer to the created Operation in the graph. + * @param scope the scope to use for the operation + * @return an {@link OperationBuilder}, which will add the Operation to the graph when {@link + * OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked, + * then some resources may leak. + */ @Override public GraphOperationBuilder opBuilder(String type, String name, Scope scope) { if (!isOpEnabled(type)) { throw new IllegalArgumentException("Op " + type + " is not valid in graph mode."); } - return new GraphOperationBuilder(this, type, name, scope); + return new GraphOperationBuilder(this, type, name, scope, dangerousGradientBuilder); } @Override @@ -652,7 +666,8 @@ public boolean hasInitializers() { * @param dx if not null, the partial derivatives of some loss function {@code L} w.r.t. {@code y} * @return the partial derivatives {@code dy} with the size of {@code x} */ - public Output[] addGradients(String prefix, Output[] y, Output[] x, Output[] dx) { + public synchronized Output[] addGradients( + String prefix, Output[] y, Output[] x, Output[] dx) { Output[] dy = new Output[x.length]; final TF_Operation[] yHandles = new TF_Operation[y.length]; final int[] yIndices = new int[y.length]; @@ -847,6 +862,8 @@ public Output[] whileLoop( * Return the {@link SaverDef} instance used to save the state of all variables present in this * graph. * + *

    + * *

    The first time this method is called it builds the {@link SaverDef}. If this graph already * contains a "save/restore_all" operation then it is assumed to contain all necessary saving and * restoring operations. If that operation does not exist then the graph is mutated to add all the @@ -882,9 +899,21 @@ synchronized SaverDef saverDef() { private SaverDef saverDef; private final Scope baseScope; + private boolean dangerousGradientBuilder; + private final Set initializers = Collections.synchronizedSet(new LinkedHashSet<>()); private boolean newInitializers = false; + /** + * Use builders without locking. This should only be used during custom gradient building. + * + *

    The graph locks are not re-entrant, so attempting to add an op to a graph that has been + * locked by the gradient builder will fail without this. + */ + synchronized void setDangerousGradientBuilder(boolean dangerous) { + dangerousGradientBuilder = dangerous; + } + // Related native objects (such as the TF_Operation object backing an Operation instance) // have a validity tied to that of the Graph. The handles to those native objects are not // valid after Graph.close() has been invoked. @@ -930,7 +959,7 @@ Reference ref() { return new Reference(); } - private static final class OperationIterator implements Iterator { + private static final class OperationIterator implements Iterator { OperationIterator(Graph g) { this.graph = g; @@ -964,8 +993,8 @@ public boolean hasNext() { } @Override - public Operation next() { - Operation rhett = this.operation; + public GraphOperation next() { + GraphOperation rhett = this.operation; this.advance(); return rhett; } @@ -976,7 +1005,7 @@ public void remove() { } private final Graph graph; - private Operation operation; + private GraphOperation operation; private int position; } @@ -1246,7 +1275,7 @@ private static SaverDef addVariableSaver(Graph graph) { List> varOutputs = new ArrayList<>(); List> varTypes = new ArrayList<>(); - for (Iterator iter = graph.operations(); iter.hasNext(); ) { + for (Iterator iter = graph.operations(); iter.hasNext(); ) { Operation op = iter.next(); if (op.type().equals("VariableV2")) { varNames.add(op.name()); @@ -1287,6 +1316,25 @@ private static SaverDef addVariableSaver(Graph graph) { .build(); } + private static Set allGraphs = + Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>())); + + /** + * Find the graph with the matching underlying native pointer. + * + * @return the graph if there is one, else null. + */ + public static Graph findGraphForPointer(NativeGraphPointer pointer) { + for (Graph g : allGraphs) { + if (g.nativeHandle != null + && !g.nativeHandle.isNull() + && g.nativeHandle.graph().equals(pointer)) { + return g; + } + } + return null; + } + static { try { // Ensure that TensorFlow native library and classes are ready to be used diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java index d811139e9a2..4c01774f759 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java @@ -194,6 +194,7 @@ public String device() { /** Get the number of inputs to the op, not including control inputs. */ public int numInputs() { + requireHandle(unsafeNativeHandle); return TF_OperationNumInputs(getUnsafeNativeHandle()); } @@ -212,8 +213,7 @@ public Output input(int idx) { try (PointerScope scope = new PointerScope()) { TF_Input input = new TF_Input().oper(getUnsafeNativeHandle()).index(idx); TF_Output output = TF_OperationInput(input); - String opName = TF_OperationName(output.oper()).getString(); - return graph.operation(opName).output(output.index()); + return new GraphOperation(graph, output.oper()).output(output.index()); } } @@ -244,6 +244,7 @@ public Output[] inputList(int idx, int length) { /** Get the op's inputs, not including control inputs. */ public List> inputs() { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { int numInputs = numInputs(); TF_Output handles = new TF_Output(numInputs); @@ -269,6 +270,7 @@ public List> inputs() { * @param index the output to look for usages of */ public int numConsumers(int index) { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { TF_Output output = new TF_Output().oper(getUnsafeNativeHandle()).index(index); return TF_OperationOutputNumConsumers(output); @@ -282,6 +284,7 @@ public int numConsumers(int index) { * @param index the output to look for usages of */ public Set consumers(int index) { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { TF_Output output = new TF_Output().oper(getUnsafeNativeHandle()).index(index); int numConsumers = numConsumers(index); @@ -305,6 +308,7 @@ public Set consumers(int index) { * dependencies. */ public int numConsumers() { + requireHandle(unsafeNativeHandle); int all = 0; for (int i = 0; i < numOutputs(); i++) { all += numConsumers(i); @@ -316,6 +320,7 @@ public int numConsumers() { * Get the ops that use any of this op's outputs as an input, not including control dependencies. */ public Set consumers() { + requireHandle(unsafeNativeHandle); Set all = new LinkedHashSet<>(); for (int i = 0; i < numOutputs(); i++) { all.addAll(consumers(i)); @@ -325,6 +330,7 @@ public Set consumers() { /** Get the number of control inputs for this op. */ public int numControlInputs() { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { return TF_OperationNumControlInputs(getUnsafeNativeHandle()); } @@ -332,6 +338,7 @@ public int numControlInputs() { /** Get the control inputs of this op. */ public Set controlInputs() { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { int numInputs = numControlInputs(); PointerPointer handles = new PointerPointer<>(numInputs); @@ -350,6 +357,7 @@ public Set controlInputs() { /** Get the number of ops with this op as a control dependency. */ public int numControlConsumers() { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { return TF_OperationNumControlOutputs(getUnsafeNativeHandle()); } @@ -357,6 +365,7 @@ public int numControlConsumers() { /** Get the ops with this op as a control dependency. */ public Set controlConsumers() { + requireHandle(unsafeNativeHandle); try (PointerScope scope = new PointerScope()) { int numConsumers = numControlConsumers(); PointerPointer handles = new PointerPointer<>(numConsumers); @@ -373,7 +382,12 @@ public Set controlConsumers() { } } - TF_Operation getUnsafeNativeHandle() { + /** + * Get the native handle of this operation. + * + *

    No liveness or non-null checking is done, the operation may have been deallocated. + */ + public TF_Operation getUnsafeNativeHandle() { return unsafeNativeHandle; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java index 63434a9638b..64995612d7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperationBuilder.java @@ -19,7 +19,10 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_AddInput; import static org.tensorflow.internal.c_api.global.tensorflow.TF_AddInputList; import static org.tensorflow.internal.c_api.global.tensorflow.TF_FinishOperation; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_FinishOperationLocked; import static org.tensorflow.internal.c_api.global.tensorflow.TF_NewOperation; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_NewOperationLocked; +import static org.tensorflow.internal.c_api.global.tensorflow.TF_OperationName; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrBool; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrBoolList; import static org.tensorflow.internal.c_api.global.tensorflow.TF_SetAttrFloat; @@ -67,14 +70,17 @@ /** An {@link OperationBuilder} for adding {@link GraphOperation}s to a {@link Graph}. */ public final class GraphOperationBuilder implements OperationBuilder { - GraphOperationBuilder(Graph graph, String type, String name, Scope scope) { + GraphOperationBuilder( + Graph graph, String type, String name, Scope scope, boolean dangerousGradientBuilder) { this.graph = graph; this.scope = scope; - Graph.Reference r = graph.ref(); - try { - this.unsafeNativeHandle = allocate(r.nativeHandle(), type, name); - } finally { - r.close(); + this.dangerousGradientBuilder = dangerousGradientBuilder; + try (Graph.Reference r = graph.ref()) { + if (dangerousGradientBuilder) { + this.unsafeNativeHandle = allocateDangerousGradient(r.nativeHandle(), type, name); + } else { + this.unsafeNativeHandle = allocate(r.nativeHandle(), type, name); + } } } @@ -86,14 +92,17 @@ public final class GraphOperationBuilder implements OperationBuilder { @Override public GraphOperation build() { scope.apply(this); - Graph.Reference r = graph.ref(); - try { - GraphOperation op = new GraphOperation(graph, finish(unsafeNativeHandle)); + try (Graph.Reference r = graph.ref()) { + TF_Operation built; + if (dangerousGradientBuilder) { + built = finishDangerousGradient(r.nativeHandle(), unsafeNativeHandle); + } else { + built = finish(unsafeNativeHandle); + } + GraphOperation op = new GraphOperation(graph, built); unsafeNativeHandle = null; scope.onOpCreated(op); return op; - } finally { - r.close(); } } @@ -392,6 +401,14 @@ public OperationBuilder setAttr(String name, AttrValue value) { private final Graph graph; private final Scope scope; + /** + * Use builders without locking. This should only be used during custom gradient building. + * + *

    The graph locks are not re-entrant, so attempting to add an op to a graph that has been + * locked by the gradient builder will fail without this. + */ + private final boolean dangerousGradientBuilder; + private static void requireHandle(Pointer handle) { if (handle == null || handle.isNull()) { throw new IllegalStateException("Operation has already been built"); @@ -419,6 +436,20 @@ private static TF_OperationDescription allocate(TF_Graph graphHandle, String typ return TF_NewOperation(graphHandle, type, name); } + /** + * Use builders without locking. This should only be used during custom gradient building. + * + *

    The graph locks are not re-entrant, so attempting to add an op to a graph that has been + * locked by the gradient builder will fail without this. + */ + private static TF_OperationDescription allocateDangerousGradient( + TF_Graph graphHandle, String type, String name) { + if (graphHandle == null || graphHandle.isNull()) { + throw new IllegalStateException("close() has been called on the Graph"); + } + return TF_NewOperationLocked(graphHandle, type, name); + } + private static TF_Operation finish(TF_OperationDescription handle) { requireHandle(handle); @@ -430,6 +461,24 @@ private static TF_Operation finish(TF_OperationDescription handle) { } } + /** + * Use builders without locking. This should only be used during custom gradient building. + * + *

    The graph locks are not re-entrant, so attempting to add an op to a graph that has been + * locked by the gradient builder will fail without this. + */ + private static TF_Operation finishDangerousGradient(TF_Graph g, TF_OperationDescription handle) { + requireHandle(handle); + + try (PointerScope scope = new PointerScope()) { + TF_Status status = TF_Status.newStatus(); + TF_Operation op = TF_FinishOperationLocked(handle, status); + status.throwExceptionIfNotOK(); + g.name_map().erase(TF_OperationName(op)); + return op; + } + } + private static void addInput(TF_OperationDescription handle, TF_Operation opHandle, int index) { try (PointerScope scope = new PointerScope()) { TF_Output out = new TF_Output(); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java index 23f4c62bc7f..53748b82271 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFlow.java @@ -1,18 +1,18 @@ /* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import static org.tensorflow.internal.c_api.global.tensorflow.TF_DeleteBuffer; @@ -23,13 +23,25 @@ import static org.tensorflow.internal.c_api.global.tensorflow.TF_Version; import com.google.protobuf.InvalidProtocolBufferException; +import java.util.Collections; +import java.util.IdentityHashMap; import java.util.Set; import java.util.stream.Collectors; +import org.bytedeco.javacpp.PointerPointer; import org.bytedeco.javacpp.PointerScope; import org.tensorflow.exceptions.TensorFlowException; +import org.tensorflow.internal.c_api.GradFunc; +import org.tensorflow.internal.c_api.GradOpRegistry; +import org.tensorflow.internal.c_api.NativeStatus; import org.tensorflow.internal.c_api.TF_Buffer; import org.tensorflow.internal.c_api.TF_Library; import org.tensorflow.internal.c_api.TF_Status; +import org.tensorflow.op.CustomGradient; +import org.tensorflow.op.RawCustomGradient; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.math.Add; import org.tensorflow.proto.framework.OpList; /** Static utility methods describing the TensorFlow runtime. */ @@ -138,4 +150,91 @@ private TensorFlow() {} throw e; } } + + /** + * Keeps references to custom gradient functions to prevent them from being deallocated. All + * access of this set should be synchronized on this class. + * + *

    Required for correctness + */ + private static final Set gradientFuncs = + Collections.newSetFromMap(new IdentityHashMap<>()); + + private static synchronized boolean hasGradient(String opType) { + try (PointerScope scope = new PointerScope()) { + NativeStatus status = + GradOpRegistry.Global().Lookup(opType, new GradFunc(new PointerPointer<>(1))); + return status.ok(); + } + } + + /** + * Register a custom gradient function for ops of {@code opType} type. + * + *

    Creates the gradient based off of a {@link GraphOperation}. To operate on the op input class + * instead use {@link CustomGradient}. + * + *

    Note that this only works with graph gradients, and will eventually be deprecated in favor + * of unified gradient support once it is fully supported by tensorflow core. + * + * @param opType the type of op to register the gradient for. Should usually be an {@code OP_NAME} + * field, i.e. {@link Add#OP_NAME}. + * @param gradient the gradient function to use + * @return {@code true} if the gradient was registered, {@code false} if there was already a + * gradient registered for this op + */ + public static synchronized boolean registerCustomGradient( + String opType, RawCustomGradient gradient) { + if (hasGradient(opType)) { + return false; + } + GradFunc g = RawCustomGradient.adapter(gradient); + GradOpRegistry.Global().Register(opType, g); + gradientFuncs.add(g); + return true; + } + + /** + * Register a custom gradient function for ops of {@code inputClass}'s op type. The actual op type + * is detected from the class's {@link OpInputsMetadata} annotation. As such, it only works on + * generated op classes or custom op classes with the correct annotations. To operate on the + * {@link org.tensorflow.GraphOperation} directly use {@link RawCustomGradient}. + * + * @param inputClass the inputs class of op to register the gradient for. + * @param gradient the gradient function to use + * @return {@code true} if the gradient was registered, {@code false} if there was already a + * gradient registered for this op + * @throws IllegalArgumentException if {@code inputClass} is not annotated with {@link + * OpInputsMetadata} or the op class is not annotated with {@link OpMetadata}. + */ + public static synchronized > boolean registerCustomGradient( + Class inputClass, CustomGradient gradient) { + OpInputsMetadata metadata = inputClass.getAnnotation(OpInputsMetadata.class); + + if (metadata == null) { + throw new IllegalArgumentException( + "Inputs Class " + + inputClass + + " does not have a OpInputsMetadata annotation. Was it generated by tensorflow/java? If it was, this is a bug."); + } + OpMetadata outputMetadata = metadata.outputsClass().getAnnotation(OpMetadata.class); + + if (outputMetadata == null) { + throw new IllegalArgumentException( + "Op Class " + + metadata.outputsClass() + + " does not have a OpMetadata annotation. Was it generated by tensorflow/java? If it was, this is a bug."); + } + + String opType = outputMetadata.opType(); + + if (hasGradient(opType)) { + return false; + } + + GradFunc g = CustomGradient.adapter(gradient, inputClass); + GradOpRegistry.Global().Register(opType, g); + gradientFuncs.add(g); + return true; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java index 369ccabce43..da9921daea3 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java @@ -1,4 +1,5 @@ -/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. +/* +Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,10 +16,17 @@ */ package org.tensorflow.internal.c_api.presets; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; import java.util.List; import org.bytedeco.javacpp.ClassProperties; import org.bytedeco.javacpp.LoadEnabled; import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.annotation.Adapter; +import org.bytedeco.javacpp.annotation.Cast; import org.bytedeco.javacpp.annotation.NoException; import org.bytedeco.javacpp.annotation.Platform; import org.bytedeco.javacpp.annotation.Properties; @@ -46,8 +54,16 @@ // "tensorflow/c/env.h", "tensorflow/c/kernels.h", "tensorflow/c/ops.h", + "tensorflow_adapters.h", "tensorflow/c/eager/c_api.h", - "tensorflow/c/eager/c_api_experimental.h" + "tensorflow/c/eager/c_api_experimental.h", + "tensorflow/cc/framework/scope.h", + "tensorflow/cc/framework/grad_op_registry.h", + "tensorflow/core/platform/status.h", + "tensorflow/core/graph/graph.h", + "tensorflow/c/tf_status_helper.h", + "tensorflow/cc/framework/ops.h", + "tensorflow/c/c_api_internal.h", }, link = "tensorflow_cc@.2", preload = {"iomp5", "mklml", "mklml_intel", "tensorflow_framework@.2"}, @@ -272,12 +288,60 @@ public void init(ClassProperties properties) { @Override public void map(InfoMap infoMap) { infoMap + .put( + new Info("c_api_internal.h") + .linePatterns( + "struct TF_OperationDescription \\{", + "\\};", + "struct TF_Graph \\{", + "\\};", + "struct TF_Operation \\{", + "\\};", + "// Exposed helper functions", + "// End Exposed helper functions")) + .put( + new Info("graph.h") + .linePatterns( + "class Node \\{", "// Stores debug information associated with the Node.")) + .put(new Info("Node").cppTypes("tensorflow::Node").purify()) + .put( + new Info( + "tensorflow::NodeDef", + "tensorflow::OpDef", + "tensorflow::AttrSlice", + "tensorflow::Edge", + "tensorflow::EdgeSet", + "tensorflow::WhileContext", + "tensorflow::NodeProperties", + "protobuf::RepeatedPtrField", + "gtl::iterator_range", + "tensorflow::DataType", + "tensorflow::DataTypeVector", + "tensorflow::Node::set_original_node_names", + "tensorflow::Node::AddAttr", + "tensorflow::Node::ClearAttr", + "tensorflow::Node::input_node") + .skip()) + .put( + new Info("c_api.cc") + .linePatterns( + "// Helper functions -+", + "// Shape functions -+", + "static TF_OperationDescription\\* TF_NewOperationLocked\\(TF_Graph\\* graph,", + "\\}", + "static TF_Operation\\* TF_FinishOperationLocked\\(TF_OperationDescription\\* desc,", + "\\}")) + .put(new Info("OutputTensor", "TensorId", "tensorflow::AttrValue").skip()) .put( new Info("c_api_experimental.h") .linePatterns( "typedef struct TFE_OpAttrs TFE_OpAttrs;", "#define TFE_CUSTOM_DEVICE_VERSION 4")) - .put(new Info("TF_CAPI_EXPORT", "TF_Bool").cppTypes().annotations()) + .put( + new Info("TF_CAPI_EXPORT", "TF_Bool", "TF_GUARDED_BY", "TF_MUST_USE_RESULT") + .cppTypes() + .annotations()) + .put(new Info("TF_DISALLOW_COPY_AND_ASSIGN").skip()) .put( new Info("TF_Buffer::data") .javaText( @@ -305,13 +369,24 @@ public void map(InfoMap infoMap) { .put( new Info("TF_Graph") .pointerTypes("TF_Graph") - .base("org.tensorflow.internal.c_api.AbstractTF_Graph")) + .base("org.tensorflow.internal.c_api.AbstractTF_Graph") + .purify()) + .put(new Info("tensorflow::Graph").javaNames("NativeGraphPointer")) .put( new Info("TF_Graph::graph") - .javaText("public native @MemberGetter @ByRef Graph graph();")) + .javaText("public native @MemberGetter @ByRef NativeGraphPointer graph();")) .put( - new Info("TF_Graph::refiner") - .javaText("public native @MemberGetter @ByRef ShapeRefiner refiner();")) + new Info( + "TF_Graph::refiner", + "TF_Graph::mu", + "TF_Graph::sessions", + "TF_Graph::delete_requested") + .skip()) + .put( + new Info("std::unordered_map") + .pointerTypes("NameMap") + .define() + .javaText("public native long erase(@StdString BytePointer key);")) .put( new Info("TF_Function") .pointerTypes("TF_Function") @@ -320,7 +395,8 @@ public void map(InfoMap infoMap) { new Info("TF_ImportGraphDefOptions") .pointerTypes("TF_ImportGraphDefOptions") .base("org.tensorflow.internal.c_api.AbstractTF_ImportGraphDefOptions")) - .put(new Info("TF_Operation", "TF_WhileParams").purify()) + .put(new Info("TF_WhileParams").purify()) + .put(new Info("TF_Operation").purify()) .put( new Info("TF_Operation::node") .javaText("public native @MemberGetter @ByRef Node node();")) @@ -374,6 +450,79 @@ public void map(InfoMap infoMap) { "TFE_MonitoringSampler2", "TFE_CustomDeviceTensorHandle", "TFE_CustomDevice") + .skip()) + .put(new Info("TF_OperationDescription").pointerTypes("TF_OperationDescription").purify()) + .put(new Info("tensorflow::Scope").javaNames("TF_Scope")) + .put(new Info("tensorflow::NodeBuilder").pointerTypes("NodeBuilder")) + .put( + new Info("string", "tensorflow::string") + .annotations("@StdString") + .valueTypes("BytePointer", "String") + .pointerTypes("BytePointer")) + .put(new Info("absl::Span", "tensorflow::gtl::ArraySlice").annotations("@Span")) + .put( + new Info("std::vector").pointerTypes("NativeOutputVector").define()) + .put(new Info("tensorflow::Output").javaNames("NativeOutput")) + .put(new Info("tensorflow::Operation").javaNames("NativeOperation")) + .put(new Info("tensorflow::Status").javaNames("NativeStatus").purify()) + .put( + new Info("tensorflow::int32") + .cast() + .valueTypes("int") + .pointerTypes("IntPointer", "IntBuffer", "int[]")) + .put( + new Info( + "tensorflow::CompositeOpScopes", + "tensorflow::Input", + "tensorflow::InputList", + "tensorflow::OutputHash", + "tensorflow::StackFrame", + "tensorflow::StatusGroup", + "tensorflow::internal::TF_StatusDeleter", + "tensorflow::GraphDef", + "tensorflow::Scope::graph_as_shared_ptr", + "tensorflow::Scope::ToGraphDef", + "tensorflow::Scope::ToGraph", + "tensorflow::Scope::DoShapeInference", + "tensorflow::Scope::DisabledShapeInferenceScope", + "tensorflow::Scope::control_deps", + "tensorflow::Scope::WithKernelLabel", + "tensorflow::Scope::ClearColocation", + "tensorflow::Scope::ColocateWith", + "tensorflow::Scope::ColocateWith", + "tensorflow::Scope::WithXlaCluster", + "tensorflow::Scope::WithAssignedDevice", + "tensorflow::Scope::status", + "tensorflow::Scope::UpdateStatus", + "tensorflow::Status::code", + "tensorflow::CreateOutputWithScope", + "TF_OperationDescription::colocation_constraints", + "tensorflow::Operation::num_inputs", + "tensorflow::Operation::input_type", + "tensorflow::Operation::input", + "tensorflow::Operation::num_outputs", + "tensorflow::Operation::output_type", + "tensorflow::Operation::output", + "tensorflow::Operation::hash", + "tensorflow::Output::hash", + "tensorflow::Output::type", + "tensorflow::Status::GetAllPayloads", + "tensorflow::Status::ReplaceAllPayloads", + "tensorflow::Status::ErasePayload", + "tensorflow::Status::SetPayload", + "tensorflow::Status::GetPayload", + "tensorflow::Node::SetStackTrace", + "tensorflow::Node::GetStackTrace") .skip()); } + + @Documented + @Retention(RetentionPolicy.RUNTIME) + @Target({ElementType.METHOD, ElementType.PARAMETER}) + @Cast({"absl::Span", "&"}) + @Adapter("SpanAdapter") + public @interface Span { + + String value() default ""; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/CustomGradient.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/CustomGradient.java new file mode 100644 index 00000000000..2d573808222 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/CustomGradient.java @@ -0,0 +1,62 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.op; + +import java.util.List; +import org.tensorflow.Operand; +import org.tensorflow.Output; +import org.tensorflow.TensorFlow; +import org.tensorflow.internal.c_api.GradFunc; + +/** + * A custom gradient for ops of type {@link T}. Should be registered using {@link + * TensorFlow#registerCustomGradient(Class, CustomGradient)}. + * + *

    Creates the gradient based off of an instance of the op inputs class, which is created using + * reflection. To operate on the {@link org.tensorflow.GraphOperation} directly use {@link + * RawCustomGradient}. + * + *

    The type of the op is not checked here, but it is required to match the class given to the + * adapter. + * + * @param the type of op this gradient is for. + */ +@SuppressWarnings("rawtypes") +@FunctionalInterface +public interface CustomGradient { + + /** + * Calculate the gradients for {@code op}. + * + * @param tf the {@link Ops} instance used to create ops + * @param op the op to calculate the gradients of. + * @param gradInputs the gradients of the op's outputs. + * @return the gradients of the op's inputs. + */ + List> call(Ops tf, T op, List> gradInputs); + + /** + * Create an adapter for the custom gradient so that it can be used by native code. + * + *

    You should not be calling this yourself, use {@link TensorFlow#registerCustomGradient(Class, + * CustomGradient)}. + */ + public static > GradFunc adapter( + CustomGradient gradient, Class opClass) { + return new TypedGradientAdapter(gradient, opClass); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/GradientScope.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/GradientScope.java new file mode 100644 index 00000000000..5254f86701f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/GradientScope.java @@ -0,0 +1,159 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.op; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.tensorflow.DeviceSpec; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.Graph; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.internal.c_api.NativeOperation; +import org.tensorflow.internal.c_api.TF_Scope; + +/** A {@link Scope} implementation backed by a native scope. Only used for gradient declarations. */ +public final class GradientScope implements Scope { + + @Override + public ExecutionEnvironment env() { + return graph; + } + + @Override + public GradientScope withSubScope(String childScopeName) { + return new GradientScope(nativeScope.NewSubScope(childScopeName), graph, null, device); + } + + @Override + public GradientScope withName(String opName) { + return new GradientScope(nativeScope, graph, opName, device); + } + + @Override + public GradientScope withNameAsSubScope(String defaultName) { + if (opName == null) { + return withSubScope(defaultName); + } else { + return withSubScope(opName); + } + } + + @Override + public GradientScope withDevice(DeviceSpec newDevice) { + return new GradientScope( + nativeScope.WithDevice(newDevice.toString()), graph, newDevice.toString()); + } + + @Override + public Scope withInitScope() { + throw new IllegalStateException("Can't add init operations in a gradient scope"); + } + + @Override + public String makeOpName(String defaultName) { + String name = opName != null ? opName : defaultName; + return nativeScope.GetUniqueNameForOp(name); + } + + @Override + public String makeUnique(String id) { + return nativeScope.GetUniqueNameForOp(id); + } + + @Override + public void refreshNames() {} + + @Override + public GradientScope withControlDependencies(Iterable controls) { + List controlDeps = + StreamSupport.stream(controls.spliterator(), false).collect(Collectors.toList()); + NativeOperation ops = new NativeOperation(controlDeps.size()); + + for (int i = 0; i < controlDeps.size(); i++) { + Operation op = controlDeps.get(i).op(); + if (!(op instanceof GraphOperation)) { + throw new IllegalArgumentException("Can only add graph ops as control dependencies"); + } + ops.position(i) + .put(new NativeOperation(((GraphOperation) op).getUnsafeNativeHandle().node())); + } + + return new GradientScope( + nativeScope.WithControlDependencies(new NativeOperation(ops)), graph, device); + } + + @Override + public Scope withControlDependencyOps(Iterable controls) { + List controlDeps = + StreamSupport.stream(controls.spliterator(), false).collect(Collectors.toList()); + NativeOperation ops = new NativeOperation(controlDeps.size()); + + for (int i = 0; i < controlDeps.size(); i++) { + Operation op = controlDeps.get(i); + if (!(op instanceof GraphOperation)) { + throw new IllegalArgumentException("Can only add graph ops as control dependencies"); + } + ops.position(i) + .put(new NativeOperation(((GraphOperation) op).getUnsafeNativeHandle().node())); + } + + return new GradientScope( + nativeScope.WithControlDependencies(new NativeOperation(ops)), graph, device); + } + + @Override + public OperationBuilder apply(OperationBuilder builder) { + return builder; + } + + @Override + public void onOpCreated(Operation op) {} + + @Override + public String getDeviceString() { + if (device == null) { + throw new UnsupportedOperationException( + "Can't get device string for gradient scope unless it has been explicitly set"); + } else { + return device; + } + } + + @Override + public boolean isInit() { + return false; + } + + GradientScope(TF_Scope nativeScope, Graph graph, String device) { + this(nativeScope, graph, null, device); + } + + private GradientScope(TF_Scope nativeScope, Graph graph, String opName, String device) { + this.graph = graph; + this.nativeScope = nativeScope; + this.opName = opName; + this.device = device; + } + + private final Graph graph; + private final TF_Scope nativeScope; + private final String opName; + private final String device; +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/OpScope.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/OpScope.java new file mode 100644 index 00000000000..4e9aab97082 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/OpScope.java @@ -0,0 +1,153 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +package org.tensorflow.op; + +import java.util.ArrayList; +import java.util.List; +import org.tensorflow.DeviceSpec; +import org.tensorflow.ExecutionEnvironment; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; + +/** + * A Java implementation of {@link Scope}. This is used in all cases except custom gradient + * definitions. + */ +public final class OpScope implements Scope { + + /** + * Create a new top-level scope. + * + * @param env The execution environment used by the scope. + */ + public OpScope(ExecutionEnvironment env) { + this(env, new NameScope(env), new ArrayList<>(), DeviceSpec.newBuilder().build(), false); + } + + @Override + public ExecutionEnvironment env() { + return env; + } + + @Override + public OpScope withSubScope(String childScopeName) { + return new OpScope( + env, nameScope.withSubScope(childScopeName, env), controlDependencies, deviceSpec, isInit); + } + + @Override + public OpScope withName(String opName) { + return new OpScope(env, nameScope.withName(opName), controlDependencies, deviceSpec, isInit); + } + + @Override + public OpScope withNameAsSubScope(String defaultName) { + return new OpScope( + env, + nameScope.withSubScope(nameScope.makeOpName(defaultName), env), + controlDependencies, + deviceSpec, + isInit); + } + + @Override + public OpScope withDevice(DeviceSpec newDevice) { + return new OpScope(env, nameScope, controlDependencies, newDevice, isInit); + } + + @Override + public OpScope withInitScope() { + return new OpScope(env.initEnv(), nameScope, new ArrayList<>(), deviceSpec, true); + } + + @Override + public String makeOpName(String defaultName) { + return nameScope.makeOpName(defaultName); + } + + @Override + public String makeUnique(String id) { + return nameScope.makeUnique(id); + } + + @Override + public void refreshNames() { + nameScope.importIdsFrom(env); + } + + private OpScope( + ExecutionEnvironment env, + NameScope nameScope, + List controlDependencies, + DeviceSpec deviceSpec, + boolean isInit) { + this.env = env; + this.nameScope = nameScope; + this.controlDependencies = controlDependencies; + this.deviceSpec = deviceSpec; + this.isInit = isInit; + } + + @Override + public Scope withControlDependencyOps(Iterable controls) { + ArrayList toAdd = new ArrayList<>(); + for (Operation control : controls) { + env.checkInput(control); + if (isInit && !env.isInitOp(control)) { + throw new IllegalArgumentException("Init scope can not have non-init control dependency."); + } + if (isInit || !env.isInitOp(control)) { + toAdd.add(control); + } + } + + return new OpScope(env, nameScope, toAdd, deviceSpec, isInit); + } + + @Override + public OperationBuilder apply(OperationBuilder builder) { + builder.setDevice(deviceSpec.toString()); + for (Operation control : controlDependencies) { + if (isInit || !env.isInitOp(control)) { + builder.addControlInput(control); + } + } + return builder; + } + + @Override + public void onOpCreated(Operation op) { + if (isInit) { + env.registerInitOp(op); + } + } + + @Override + public boolean isInit() { + return isInit; + } + + @Override + public String getDeviceString() { + return deviceSpec.toString(); + } + + private final ExecutionEnvironment env; + private final List controlDependencies; + private final NameScope nameScope; + private final DeviceSpec deviceSpec; + private final boolean isInit; +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawCustomGradient.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawCustomGradient.java new file mode 100644 index 00000000000..949891b7ff3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawCustomGradient.java @@ -0,0 +1,59 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.op; + +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Output; +import org.tensorflow.TensorFlow; +import org.tensorflow.internal.c_api.GradFunc; + +/** + * A custom gradient for an op of unspecified type. Should be registered using {@link + * TensorFlow#registerCustomGradient(String, RawCustomGradient)}. + * + *

    Creates the gradient based off of a {@link GraphOperation}. To operate on the op input class + * instead use {@link CustomGradient}. + * + *

    The op type of {@code op} will depend on the op type string passed to the registration method. + * Note that the registration method can be called more than once, resulting this gradient function + * being used for multiple different op types. + */ +@FunctionalInterface +public interface RawCustomGradient { + + /** + * Calculate the gradients for {@code op}. + * + * @param tf the {@link Ops} instance used to create ops + * @param op the op to calculate the gradients of. + * @param gradInputs the gradients of the op's outputs. + * @return the gradients of the op's inputs. + */ + List> call(Ops tf, GraphOperation op, List> gradInputs); + + /** + * Create an adapter for the custom gradient so that it can be used by native code. + * + *

    You should not be calling this yourself, use {@link + * TensorFlow#registerCustomGradient(String, RawCustomGradient)}. + */ + public static GradFunc adapter(RawCustomGradient gradient) { + return new RawGradientAdapter(gradient); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawGradientAdapter.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawGradientAdapter.java new file mode 100644 index 00000000000..f1968014dae --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawGradientAdapter.java @@ -0,0 +1,71 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.op; + +import java.util.List; +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.BaseGradientAdapter; +import org.tensorflow.Graph; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Output; +import org.tensorflow.internal.c_api.NativeOperation; +import org.tensorflow.internal.c_api.NativeOutputVector; +import org.tensorflow.internal.c_api.NativeStatus; +import org.tensorflow.internal.c_api.TF_Scope; + +/** A native adapter for {@link RawCustomGradient}. */ +final class RawGradientAdapter extends BaseGradientAdapter { + + private final RawCustomGradient gradient; + + RawGradientAdapter(RawCustomGradient gradient) { + super(); + this.gradient = gradient; + } + + @Override + public NativeStatus call( + TF_Scope scope, + NativeOperation op, + NativeOutputVector grad_inputs, + NativeOutputVector grad_outputs) { + try (PointerScope pointerScope = new PointerScope()) { + Graph g = Graph.findGraphForPointer(scope.graph()); + if (g == null) { + throw new IllegalStateException("No graph found for native gradient scope."); + } + + GraphOperation operation = BaseGradientAdapter.getGraphOp(g, op.node()); + + Scope nativeScope = new GradientScope(scope, g, null).withSubScope(operation.name()); + Ops tf = new Ops(nativeScope); + + List> gradInputs = BaseGradientAdapter.fromNativeOutputs(g, grad_inputs); + + // The graph locks are not re-entrant, so attempting to add an op to a graph that has been + // locked by the gradient builder will fail without this. + BaseGradientAdapter.useDangerousLockedBuilders(g, true); + List> gradOutputs = gradient.call(tf, operation, gradInputs); + BaseGradientAdapter.useDangerousLockedBuilders(g, false); + + BaseGradientAdapter.putToNativeOutputs(gradOutputs, grad_outputs); + } + return NativeStatus.OK(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawOp.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawOp.java index 53137a84b54..b55a0062c44 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawOp.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/RawOp.java @@ -59,8 +59,19 @@ public final String toString() { * Constructor. * * @param operation the underlying operation + * @param requiredType the type that the underlying operation must be */ - protected RawOp(Operation operation) { + protected RawOp(Operation operation, String requiredType) { + if (!requiredType.equals(operation.type())) { + throw new IllegalArgumentException( + "Can't create a " + + this.getClass() + + " from an operation with type \"" + + operation.type() + + "\", operation must have type \"" + + requiredType + + "\"."); + } this.operation = operation; } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java index b4705ea95a3..bb5c03844bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/Scope.java @@ -15,8 +15,6 @@ */ package org.tensorflow.op; -import java.util.ArrayList; -import java.util.List; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.tensorflow.DeviceSpec; @@ -80,24 +78,10 @@ * *

    Scope objects are not thread-safe. */ -public final class Scope { - - /** - * Create a new top-level scope. - * - *

    For internal use only, use {@link ExecutionEnvironment#baseScope()} if you need a - * base level scope. - * - * @param env The execution environment used by the scope. - */ - public Scope(ExecutionEnvironment env) { - this(env, new NameScope(env), new ArrayList<>(), DeviceSpec.newBuilder().build(), false); - } +public interface Scope { /** Returns the execution environment used by this scope. */ - public ExecutionEnvironment env() { - return env; - } + ExecutionEnvironment env(); /** * Returns a new scope where added operations will have the provided name prefix. @@ -112,10 +96,7 @@ public ExecutionEnvironment env() { * @return a new subscope * @throws IllegalArgumentException if the name is invalid */ - public Scope withSubScope(String childScopeName) { - return new Scope( - env, nameScope.withSubScope(childScopeName, env), controlDependencies, deviceSpec, isInit); - } + Scope withSubScope(String childScopeName); /** * Return a new scope that uses the provided name for an op. @@ -129,9 +110,7 @@ public Scope withSubScope(String childScopeName) { * @return a new Scope that uses opName for operations. * @throws IllegalArgumentException if the name is invalid */ - public Scope withName(String opName) { - return new Scope(env, nameScope.withName(opName), controlDependencies, deviceSpec, isInit); - } + Scope withName(String opName); /** * Returns a new scope where added operations will be prefixed by this scope's op name (set by @@ -149,14 +128,7 @@ public Scope withName(String opName) { * @return a new subscope * @throws IllegalArgumentException if the name is invalid */ - public Scope withNameAsSubScope(String defaultName) { - return new Scope( - env, - nameScope.withSubScope(nameScope.makeOpName(defaultName), env), - controlDependencies, - deviceSpec, - isInit); - } + Scope withNameAsSubScope(String defaultName); /** * Return a new scope that uses the provided device specification for an op. @@ -164,19 +136,13 @@ public Scope withNameAsSubScope(String defaultName) { *

    Operations created within this scope will place the created operations on the device(s) * matching the provided spec. * - * @param deviceSpec device specification for an operator in the returned scope + * @param newDevice device specification for an operator in the returned scope * @return a new Scope that uses opName for operations. */ - public Scope withDevice(DeviceSpec deviceSpec) { - return new Scope(env, nameScope, controlDependencies, deviceSpec, isInit); - } - - // TODO stop gradient recording in init scopes (once we have gradient recording) + Scope withDevice(DeviceSpec newDevice); /** Get an extension of this scope that generates initialization ops. */ - public Scope withInitScope() { - return new Scope(env.initEnv(), nameScope, new ArrayList<>(), deviceSpec, true); - } + Scope withInitScope(); /** * Create a unique name for an operator and reserves it, using a provided default if necessary. @@ -198,55 +164,17 @@ public Scope withInitScope() { * @return unique name for the operator. * @throws IllegalArgumentException if the default name is invalid. */ - public String makeOpName(String defaultName) { - return nameScope.makeOpName(defaultName); - } + String makeOpName(String defaultName); /** Makes a unique name from {@code id} and reserves it. */ - public String makeUnique(String id) { - return nameScope.makeUnique(id); - } - - /** - * Returns a builder to create a new {@link Operation}. - * - *

    Note that {@code name} is automatically made unique. - * - * @param type of the Operation (i.e., identifies the computation to be performed) - * @param name to refer to the created Operation in this environment scope. Is uniquified. - * @return an {@link OperationBuilder} to create an Operation when {@link - * OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked, - * then some resources may leak. - */ - public OperationBuilder opBuilder(String type, String name) { - return env.opBuilder(type, makeOpName(name), this); - } - - public static boolean isValidOpName(String name) { - return NameScope.isValidName(name); - } + String makeUnique(String id); /** * Refresh the used name list (used for uniquifying names) from the underlying graph. * *

    Should be used if you made changes to the graph from non-{@code Scope} APIs. */ - public void refreshNames() { - nameScope.importIdsFrom(env); - } - - private Scope( - ExecutionEnvironment env, - NameScope nameScope, - List controlDependencies, - DeviceSpec deviceSpec, - boolean isInit) { - this.env = env; - this.nameScope = nameScope; - this.controlDependencies = controlDependencies; - this.deviceSpec = deviceSpec; - this.isInit = isInit; - } + void refreshNames(); /** * Returns a new scope where added operations will have the provided control dependencies. @@ -260,7 +188,7 @@ private Scope( * @param controls control dependencies for ops created with the returned scope * @return a new scope with the provided control dependencies */ - public Scope withControlDependencies(Iterable controls) { + default Scope withControlDependencies(Iterable controls) { return withControlDependencyOps( StreamSupport.stream(controls.spliterator(), false) .map(Op::op) @@ -279,19 +207,26 @@ public Scope withControlDependencies(Iterable controls) { * @param controls control dependencies for ops created with the returned scope * @return a new scope with the provided control dependencies */ - public Scope withControlDependencyOps(Iterable controls) { - ArrayList toAdd = new ArrayList<>(); - for (Operation control : controls) { - env.checkInput(control); - if (isInit && !env.isInitOp(control)) { - throw new IllegalArgumentException("Init scope can not have non-init control dependency."); - } - if (isInit || !env.isInitOp(control)) { - toAdd.add(control); - } - } + Scope withControlDependencyOps(Iterable controls); + + /** + * Returns a builder to create a new {@link Operation}. + * + *

    Note that {@code name} is automatically made unique. + * + * @param type of the Operation (i.e., identifies the computation to be performed) + * @param name to refer to the created Operation in this environment scope. Is uniquified. + * @return an {@link OperationBuilder} to create an Operation when {@link + * OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked, + * then some resources may leak. + */ + default OperationBuilder opBuilder(String type, String name) { + return env().opBuilder(type, makeOpName(name), this); + } - return new Scope(env, nameScope, toAdd, deviceSpec, isInit); + /** Check whether {@code name} is a valid name for an operation. */ + static boolean isValidOpName(String name) { + return NameScope.isValidName(name); } /** @@ -302,40 +237,18 @@ public Scope withControlDependencyOps(Iterable controls) { * * @param builder OperationBuilder to add control inputs and device specification to */ - public OperationBuilder apply(OperationBuilder builder) { - builder.setDevice(deviceSpec.toString()); - for (Operation control : controlDependencies) { - if (isInit || !env.isInitOp(control)) { - builder.addControlInput(control); - } - } - return builder; - } + OperationBuilder apply(OperationBuilder builder); /** * Handle op creation, like registering it as an init op if the scope is init. * *

    FOR INTERNAL USE ONLY */ - public void onOpCreated(Operation op) { - if (isInit) { - env.registerInitOp(op); - } - } + void onOpCreated(Operation op); /** Returns device string from the scope. */ - public String getDeviceString() { - return deviceSpec.toString(); - } + String getDeviceString(); /** Get whether this scope is building init ops. */ - public boolean isInit() { - return isInit; - } - - private final ExecutionEnvironment env; - private final List controlDependencies; - private final NameScope nameScope; - private final DeviceSpec deviceSpec; - private final boolean isInit; + boolean isInit(); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/TypedGradientAdapter.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/TypedGradientAdapter.java new file mode 100644 index 00000000000..085174fcb4c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/TypedGradientAdapter.java @@ -0,0 +1,82 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.op; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.List; +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.BaseGradientAdapter; +import org.tensorflow.Graph; +import org.tensorflow.Operand; +import org.tensorflow.Output; +import org.tensorflow.internal.c_api.NativeOperation; +import org.tensorflow.internal.c_api.NativeOutputVector; +import org.tensorflow.internal.c_api.NativeStatus; +import org.tensorflow.internal.c_api.TF_Scope; + +/** A native adapter for {@link CustomGradient}. */ +final class TypedGradientAdapter> extends BaseGradientAdapter { + + private final CustomGradient gradient; + private final Class opInputClass; + private final Constructor ctor; + + TypedGradientAdapter(CustomGradient gradient, Class opInputClass) { + super(); + this.gradient = gradient; + this.opInputClass = opInputClass; + //noinspection unchecked + this.ctor = (Constructor) this.opInputClass.getDeclaredConstructors()[0]; + } + + @Override + public NativeStatus call( + TF_Scope scope, + NativeOperation op, + NativeOutputVector grad_inputs, + NativeOutputVector grad_outputs) { + try (PointerScope pointerScope = new PointerScope()) { + Graph g = Graph.findGraphForPointer(scope.graph()); + if (g == null) { + throw new IllegalStateException("No graph found for native gradient scope."); + } + + T rawOp = ctor.newInstance(BaseGradientAdapter.getGraphOp(g, op.node())); + + Scope nativeScope = + new GradientScope(scope, g, null).withSubScope(rawOp.getOutputs().op().name()); + + Ops tf = new Ops(nativeScope); + + List> gradInputs = BaseGradientAdapter.fromNativeOutputs(g, grad_inputs); + + // The graph locks are not re-entrant, so attempting to add an op to a graph that has been + // locked by the gradient builder will fail without this. + BaseGradientAdapter.useDangerousLockedBuilders(g, true); + List> gradOutputs = gradient.call(tf, rawOp, gradInputs); + BaseGradientAdapter.useDangerousLockedBuilders(g, false); + + BaseGradientAdapter.putToNativeOutputs(gradOutputs, grad_outputs); + + } catch (InvocationTargetException | InstantiationException | IllegalAccessException e) { + throw new RuntimeException("Could not instantiate Op class " + opInputClass, e); + } + return NativeStatus.OK(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpInputsMetadata.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpInputsMetadata.java new file mode 100644 index 00000000000..e15f60e505a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpInputsMetadata.java @@ -0,0 +1,36 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.op.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import org.tensorflow.op.RawOp; + +/** + * An annotation to provide metadata about an op inputs accessor class. Should only be used by users + * on custom ops, will be generated for non-custom ops. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface OpInputsMetadata { + + /** The main op class. */ + Class outputsClass(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpMetadata.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpMetadata.java new file mode 100644 index 00000000000..7f3bc929f73 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/annotation/OpMetadata.java @@ -0,0 +1,40 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.op.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import org.tensorflow.op.RawOpInputs; + +/** + * An annotation to provide metadata about an op. Should only be used by users on custom ops, will + * be generated for non-custom ops. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface OpMetadata { + + /** The type of the op in the TF runtime. */ + String opType(); + + /** The typesafe inputs class (which should be annotated with {@link OpInputsMetadata}). */ + @SuppressWarnings("rawtypes") + Class inputsClass(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java index f9f6e00f0f6..246d222f303 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java @@ -1356,7 +1356,7 @@ public Output asOutput() { } private Constant(Operation operation) { - super(operation); + super(operation, OP_NAME); output = operation.output(0); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/resources/org/tensorflow/internal/c_api/include/tensorflow_adapters.h b/tensorflow-core/tensorflow-core-api/src/main/resources/org/tensorflow/internal/c_api/include/tensorflow_adapters.h new file mode 100644 index 00000000000..c3ad3ae672e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/resources/org/tensorflow/internal/c_api/include/tensorflow_adapters.h @@ -0,0 +1,42 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + */ + +#include "absl/types/span.h" + +using namespace absl; + +template class SpanAdapter { +public: + SpanAdapter(T const * ptr, typename Span::size_type size, void* owner) : ptr((T*)ptr), size(size), owner(owner), + arr2(ptr ? Span((T*)ptr, size) : Span()), arr(arr2) { } + SpanAdapter(const Span& arr) : ptr(0), size(0), owner(0), arr2(arr), arr(arr2) { } + SpanAdapter( Span& arr) : ptr(0), size(0), owner(0), arr(arr) { } + SpanAdapter(const Span* arr) : ptr(0), size(0), owner(0), arr(*(Span*)arr) { } + void assign(T* ptr, typename Span::size_type size, void* owner) { + this->ptr = ptr; + this->size = size; + this->owner = owner; + arr.set(ptr, size); + } + static void deallocate(void* owner) { free(owner); } + operator T*() { size = arr.size(); return (T*)arr.data(); } + operator Span&() { return arr; } + operator Span*() { return ptr ? &arr : 0; } + T* ptr; + typename Span::size_type size; + void* owner; + Span arr2; + Span& arr; +}; + diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java new file mode 100644 index 00000000000..62626c35641 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java @@ -0,0 +1,81 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Arrays; +import org.junit.jupiter.api.Test; +import org.tensorflow.ndarray.index.Indices; +import org.tensorflow.op.Ops; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.nn.NthElement; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TFloat32; + +public class CustomGradientTest { + + @Test + public void testAlreadyExisting() { + assertFalse( + TensorFlow.registerCustomGradient( + Cast.Inputs.class, + (tf, op, gradInputs) -> { + Operand out = gradInputs.get(0); + Operand a = tf.stridedSlice(out, Indices.slice(0, 1)); + Operand b = tf.stridedSlice(out, Indices.slice(1, 2)); + return Arrays.asList(a, b, tf.constant(0f)); + })); + } + + @Test + public void testCustomGradient() { + try (Graph g = new Graph(); + Session s = new Session(g)) { + assertTrue( + TensorFlow.registerCustomGradient( + NthElement.Inputs.class, + (tf, op, gradInputs) -> + Arrays.asList(tf.withName("inAGrad").constant(0f), tf.constant(0f)))); + + Ops tf = Ops.create(g); + Output x = tf.placeholder(TFloat32.class).output(); + Output y = + tf.math.add(tf.nn.nthElement(x, tf.constant(2)), tf.constant(4f)).asOutput(); + + Output[] grads0 = g.addGradients(y, toArray(x)); + assertNotNull(grads0); + assertEquals(1, grads0.length); + assertEquals(DataType.DT_FLOAT, grads0[0].dataType()); + + try (TFloat32 c1 = TFloat32.vectorOf(3.0f, 2.0f, 1.0f, 0.0f); + AutoCloseableList outputs = + new AutoCloseableList<>(s.runner().feed(x, c1).fetch(grads0[0]).run())) { + + assertEquals(1, outputs.size()); + assertEquals(0.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); + } + } + } + + private static Output[] toArray(Output... outputs) { + return outputs; + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationTest.java index f52166aaac3..ab5d9232593 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationTest.java @@ -45,6 +45,7 @@ public class GraphOperationTest { public void outputListLengthFailsOnInvalidName() { try (Graph g = new Graph()) { Ops tf = Ops.create(g); + Operation op = tf.math.add(tf.constant(1), tf.constant(2)).op(); assertEquals(1, op.outputListLength("z")); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java index 464701306f8..154d3903dcd 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java @@ -147,7 +147,7 @@ private static void validateImportedGraph(Graph g, String prefix) { public void iterateOverOperations() { try (Graph g = new Graph()) { Ops tf = Ops.create(g); - Iterator iterator = g.operations(); + Iterator iterator = g.operations(); HashSet operations; assertFalse(iterator.hasNext()); @@ -275,7 +275,6 @@ public void addGradientsToGraph() { .fetch(grads1[0]) .fetch(grads1[1]) .run())) { - assertEquals(3, outputs.size()); assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); assertEquals(6.0f, ((TFloat32) outputs.get(1)).getFloat(), 0.0f); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java index 3575da6c8c2..95da0520f7d 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java @@ -264,7 +264,7 @@ public static void testFetchVariable() { private static int numOperations(Graph g) { int numOperations = 0; - for (Iterator it = g.operations(); it.hasNext(); ) { + for (Iterator it = g.operations(); it.hasNext(); ) { Operation o = it.next(); numOperations++; } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/RawOpTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/RawOpTest.java index 5d523a986ad..d58e349b7e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/RawOpTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/RawOpTest.java @@ -18,18 +18,30 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.HashSet; import java.util.Set; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; +import org.tensorflow.Operand; import org.tensorflow.Output; +import org.tensorflow.op.math.Add; import org.tensorflow.types.TInt32; /** Unit tests for {@link RawOp} */ public class RawOpTest { + @Test + public void wrongOpType() { + try (Graph g = new Graph()) { + Ops tf = Ops.create(g); + Operand a = tf.constant(10); + assertThrows(IllegalArgumentException.class, () -> new Add(a.op())); + } + } + @Test public void equalsHashcode() { try (Graph g = new Graph()) { @@ -38,10 +50,10 @@ public void equalsHashcode() { Output array = tf.constant(new int[2]).asOutput(); RawOp test1 = - new RawOp(g.baseScope().opBuilder("Shape", "shape1").addInput(array).build()) {}; + new RawOp(g.baseScope().opBuilder("Shape", "shape1").addInput(array).build(), "Shape") {}; RawOp test2 = - new RawOp(g.baseScope().opBuilder("Shape", "shape2").addInput(array).build()) {}; - RawOp test3 = new RawOp(test1.operation) {}; + new RawOp(g.baseScope().opBuilder("Shape", "shape2").addInput(array).build(), "Shape") {}; + RawOp test3 = new RawOp(test1.operation, test1.operation.type()) {}; // equals() tests assertNotEquals(test1, test2); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java index 6b37a908f8e..e2213c7ab1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/ScopeTest.java @@ -50,7 +50,7 @@ public void testSeparateOps() { @Test public void basicNames() { try (Graph g = new Graph()) { - Scope root = new Scope(g); + Scope root = new OpScope(g); assertEquals("add", root.makeOpName("add")); assertEquals("add_1", root.makeOpName("add")); assertEquals("add_2", root.makeOpName("add")); @@ -61,7 +61,7 @@ public void basicNames() { @Test public void hierarchicalNames() { try (Graph g = new Graph()) { - Scope root = new Scope(g); + Scope root = new OpScope(g); Scope child = root.withSubScope("child"); assertEquals("child/add", child.makeOpName("add")); assertEquals("child/add_1", child.makeOpName("add")); @@ -87,7 +87,7 @@ public void hierarchicalNames() { @Test public void scopeAndOpNames() { try (Graph g = new Graph()) { - Scope root = new Scope(g); + Scope root = new OpScope(g); Scope child = root.withSubScope("child"); @@ -100,7 +100,7 @@ public void scopeAndOpNames() { @Test public void validateNames() { try (Graph g = new Graph()) { - Scope root = new Scope(g); + Scope root = new OpScope(g); final String[] invalid_names = { "_", "-", "-x", // Names are constrained to start with [A-Za-z0-9.] @@ -137,7 +137,7 @@ public void validateNames() { @Test public void basic() { try (Graph g = new Graph()) { - Scope s = new Scope(g); + Scope s = new OpScope(g); Const c1 = Const.create(s, 42); assertEquals("Const", c1.output().op().name()); Const c2 = Const.create(s, 7); @@ -152,7 +152,7 @@ public void basic() { @Test public void hierarchy() { try (Graph g = new Graph()) { - Scope root = new Scope(g); + Scope root = new OpScope(g); Scope child = root.withSubScope("child"); assertEquals("child/Const", Const.create(child, 42).output().op().name()); assertEquals("child/four", Const.create(child.withName("four"), 4).output().op().name()); @@ -163,7 +163,7 @@ public void hierarchy() { public void composite() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope s = new Scope(g); + Scope s = new OpScope(g); Output data = Const.create(s.withName("data"), new int[] {600, 470, 170, 430, 300}).output(); @@ -195,6 +195,7 @@ public void composite() { // "handwritten" sample operator classes private static final class Const { + private final Output output; static Const create(Scope s, int v) { @@ -224,6 +225,7 @@ Output output() { } private static final class Mean { + private final Output output; static Mean create(Scope s, Output input, Output reductionIndices) { @@ -241,6 +243,7 @@ Output output() { } private static final class SquaredDifference { + private final Output output; static SquaredDifference create(Scope s, Output x, Output y) { @@ -262,6 +265,7 @@ Output output() { } private static final class Variance { + private final Output output; static Variance create(Scope base, Output x) { diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java index 7c5210c0f2d..246b44b8077 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskTest.java @@ -1,19 +1,19 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow.op.core; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -23,23 +23,24 @@ import org.tensorflow.Operand; import org.tensorflow.Session; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; public class BooleanMaskTest { @Test - public void testBooleanMask(){ + public void testBooleanMask() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand input = Constant.arrayOf(scope, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); Operand input2 = ExpandDims.create(scope, input, Constant.scalarOf(scope, 0)); - Operand mask = Constant.arrayOf(scope, true, true, false, false, true, true, true, false, false, false); + Operand mask = + Constant.arrayOf(scope, true, true, false, false, true, true, true, false, false, false); Operand output1 = BooleanMask.create(scope, input, mask); Operand output2 = BooleanMask.create(scope, input2, mask, BooleanMask.axis(1)); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java index c2b514bfdb6..16c14f7a9a3 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java @@ -1,31 +1,31 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow.op.core; import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; - import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; import org.tensorflow.Session; import org.tensorflow.Tensor; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.types.TBool; import org.tensorflow.types.TInt32; @@ -36,17 +36,19 @@ public class BooleanMaskUpdateTest { public void testBooleanMaskUpdateSlice() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); - Operand input = Constant.tensorOf(scope, new int[][]{{0, 0, 0}, {1, 1, 1}, {2, 2, 2}}); + Operand input = + Constant.tensorOf(scope, new int[][] {{0, 0, 0}, {1, 1, 1}, {2, 2, 2}}); Operand mask = Constant.arrayOf(scope, true, false, false); - Operand value = Constant.tensorOf(scope, new int[][]{{-1, -1, -1}}); + Operand value = Constant.tensorOf(scope, new int[][] {{-1, -1, -1}}); Operand output = BooleanMaskUpdate.create(scope, input, mask, value); - Operand bcastOutput = BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); + Operand bcastOutput = + BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); List results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); @@ -73,17 +75,19 @@ public void testBooleanMaskUpdateSlice() { public void testBooleanMaskUpdateSliceWithBroadcast() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); - Operand input = Constant.tensorOf(scope, new int[][]{{0, 0, 0}, {1, 1, 1}, {2, 2, 2}}); + Operand input = + Constant.tensorOf(scope, new int[][] {{0, 0, 0}, {1, 1, 1}, {2, 2, 2}}); Operand mask = Constant.arrayOf(scope, true, false, false); - Operand value = Constant.vectorOf(scope, new int[]{-1, -1, -1}); + Operand value = Constant.vectorOf(scope, new int[] {-1, -1, -1}); Operand output = BooleanMaskUpdate.create(scope, input, mask, value); - Operand bcastOutput = BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); + Operand bcastOutput = + BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); List results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); @@ -110,18 +114,22 @@ public void testBooleanMaskUpdateSliceWithBroadcast() { public void testBooleanMaskUpdateAxis() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); - Operand input = Constant.tensorOf(scope, new int[][][]{{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}}); + Operand input = + Constant.tensorOf(scope, new int[][][] {{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}}); - Operand mask = Constant.arrayOf(scope, true, true, false, false, true, true, true, false, false, false); + Operand mask = + Constant.arrayOf(scope, true, true, false, false, true, true, true, false, false, false); Operand value = Constant.arrayOf(scope, -1, -1, -1, -1, -1); - Operand output = BooleanMaskUpdate.create(scope, input, mask, value, BooleanMaskUpdate.axis(2)); + Operand output = + BooleanMaskUpdate.create(scope, input, mask, value, BooleanMaskUpdate.axis(2)); - Operand bcastOutput = BooleanMaskUpdate - .create(scope, input, mask, Constant.scalarOf(scope, -1), BooleanMaskUpdate.axis(2)); + Operand bcastOutput = + BooleanMaskUpdate.create( + scope, input, mask, Constant.scalarOf(scope, -1), BooleanMaskUpdate.axis(2)); List results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java index 6df73261867..5c413b3abeb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java @@ -38,6 +38,7 @@ import org.tensorflow.ndarray.buffer.FloatDataBuffer; import org.tensorflow.ndarray.buffer.IntDataBuffer; import org.tensorflow.ndarray.buffer.LongDataBuffer; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.types.TBfloat16; @@ -62,7 +63,7 @@ public void createInts() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); try (AutoCloseableList t = @@ -81,7 +82,7 @@ public void createFloats() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); try (AutoCloseableList t = @@ -100,7 +101,7 @@ public void createDoubles() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); try (AutoCloseableList t = @@ -119,7 +120,7 @@ public void createLongs() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); try (AutoCloseableList t = @@ -138,7 +139,7 @@ public void createStrings() throws IOException { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); try (AutoCloseableList t = diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java index 9a66d2445d2..7fd64957700 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/IndexingTest.java @@ -21,52 +21,55 @@ import org.tensorflow.Graph; import org.tensorflow.Session; import org.tensorflow.ndarray.Shape; -import org.tensorflow.ndarray.index.Indices; import org.tensorflow.ndarray.index.Index; +import org.tensorflow.ndarray.index.Indices; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.types.TFloat32; public class IndexingTest { // [2, 1:2, :, tf.newaxis, ..., :4, 4::2] - private static final Index[] slice = new Index[]{ - Indices.at(2), - Indices.at(1, true), - Indices.all(), - Indices.newAxis(), - Indices.ellipsis(), - Indices.sliceTo( 4), - Indices.sliceFrom(4, 2) - }; + private static final Index[] slice = + new Index[] { + Indices.at(2), + Indices.at(1, true), + Indices.all(), + Indices.newAxis(), + Indices.ellipsis(), + Indices.sliceTo(4), + Indices.sliceFrom(4, 2) + }; @Test public void testIndexMerge() { StridedSliceHelper.StridedSliceArgs args = StridedSliceHelper.mergeIndexes(slice); - assertArrayEquals(new int[]{2, 1, 0, 0, 0, 0, 4}, args.begin); - assertArrayEquals(new int[]{3, 2, 0, 0, 0, 4, 0}, args.end); - assertArrayEquals(new int[]{1, 1, 1, 1, 1, 1, 2}, args.strides); + assertArrayEquals(new int[] {2, 1, 0, 0, 0, 0, 4}, args.begin); + assertArrayEquals(new int[] {3, 2, 0, 0, 0, 4, 0}, args.end); + assertArrayEquals(new int[] {1, 1, 1, 1, 1, 1, 2}, args.strides); assertEquals(0b0100100, args.beginMask); assertEquals(0b1000100, args.endMask); assertEquals(0b0010000, args.ellipsisMask); assertEquals(0b0001000, args.newAxisMask); assertEquals(0b0000001, args.shrinkAxisMask); - } @Test - public void testStridedSliceIndex(){ + public void testStridedSliceIndex() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {10, 10, 10, 10, 10, 10, 10, 10}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat32.class); StridedSlice output = StridedSliceHelper.stridedSlice(scope, op, slice); try (TFloat32 result = (TFloat32) sess.runner().fetch(output.asOutput()).run().get(0)) { // expected shape from Python tensorflow - assertEquals(Shape.of(1, 10, 1, 10, 10, 10, 4, 3), result.shape(), "Slice index didn't match expected (Python)"); + assertEquals( + Shape.of(1, 10, 1, 10, 10, 10, 4, 3), + result.shape(), + "Slice index didn't match expected (Python)"); } } } - } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ShapesTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ShapesTest.java index 39c04c942af..27bfa5fffb6 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ShapesTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ShapesTest.java @@ -22,6 +22,7 @@ import org.tensorflow.Graph; import org.tensorflow.Operand; import org.tensorflow.Session; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -34,7 +35,7 @@ public class ShapesTest { public void testFlatten_Operand() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Shape expResult = Shape.create(scope, operand, TInt64.class); Operand reshaped = @@ -43,12 +44,11 @@ public void testFlatten_Operand() { Shape tfshape = Shape.create(scope, actual, TInt64.class); AtomicInteger index = new AtomicInteger(); - try (TInt64 result1 = (TInt64)session.runner().fetch(tfshape.asOutput()).run().get(0); - TInt64 result2 = (TInt64)session.runner().fetch(expResult.asOutput()).run().get(0)) { + try (TInt64 result1 = (TInt64) session.runner().fetch(tfshape.asOutput()).run().get(0); + TInt64 result2 = (TInt64) session.runner().fetch(expResult.asOutput()).run().get(0)) { result1 .scalars() - .forEach( - s -> assertEquals(result2.getLong(index.getAndIncrement()), s.getLong())); + .forEach(s -> assertEquals(result2.getLong(index.getAndIncrement()), s.getLong())); } } } @@ -57,7 +57,7 @@ public void testFlatten_Operand() { @Test public void testFlatten_Shape() { try (EagerSession session = EagerSession.create()) { - Scope scope = new Scope(session); + Scope scope = new OpScope(session); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Shape expShape = Shape.create(scope, operand, TInt64.class); Operand actual = @@ -70,9 +70,7 @@ public void testFlatten_Shape() { .asTensor() .scalars() .forEach( - s -> - assertEquals( - expShape.asTensor().getLong(index.getAndIncrement()), s.getLong())); + s -> assertEquals(expShape.asTensor().getLong(index.getAndIncrement()), s.getLong())); } } @@ -81,7 +79,7 @@ public void testFlatten_Shape() { public void testSize_Shape() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2, 1})); @@ -89,7 +87,7 @@ public void testSize_Shape() { Operand size = Shapes.size(scope, tfshape, TInt64.class); AtomicInteger index = new AtomicInteger(); - try (TInt64 result1 = (TInt64)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt64 result1 = (TInt64) session.runner().fetch(size.asOutput()).run().get(0)) { result1.scalars().forEach(s -> assertEquals(8, s.getLong())); } } @@ -100,24 +98,24 @@ public void testSize_Shape() { public void testSize_Shape_Operand() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2, 1})); Shape tfshape = Shape.create(scope, actual); Operand size = Shapes.size(scope, tfshape, Constant.scalarOf(scope, 0)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(4, s.getInt())); } size = Shapes.size(scope, tfshape, Constant.scalarOf(scope, 1)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(2, s.getInt())); } size = Shapes.size(scope, tfshape, Constant.scalarOf(scope, 2)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(1, s.getInt())); } } @@ -128,23 +126,23 @@ public void testSize_Shape_Operand() { public void testSize_Operand_Operand() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2, 1})); Operand size = Shapes.size(scope, actual, Constant.scalarOf(scope, 0)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(4, s.getInt())); } size = Shapes.size(scope, actual, Constant.scalarOf(scope, 1)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(2, s.getInt())); } size = Shapes.size(scope, actual, Constant.scalarOf(scope, 2)); - try (TInt32 result = (TInt32)session.runner().fetch(size.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(size.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(1, s.getInt())); } } @@ -155,14 +153,14 @@ public void testSize_Operand_Operand() { public void testNumDimensions() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2, 1})); Shape tfshape = Shape.create(scope, actual); Operand nDims = Shapes.numDimensions(scope, tfshape); - try (TInt32 result = (TInt32)session.runner().fetch(nDims.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(nDims.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(3, s.getInt())); } } @@ -172,7 +170,7 @@ public void testNumDimensions() { @Test public void testReduceDims_Operand_Operand() { try (EagerSession session = EagerSession.create()) { - Scope scope = new Scope(session); + Scope scope = new OpScope(session); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {2, 2, 2})); @@ -197,7 +195,7 @@ public void testReduceDims_Operand_Operand() { @Test public void testReduceDims_Shape_Operand() { try (EagerSession session = EagerSession.create()) { - Scope scope = new Scope(session); + Scope scope = new OpScope(session); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {2, 2, 2})); @@ -249,7 +247,7 @@ public void testReduceDims_Shape_Operand() { public void testSqueeze() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 1, 2, 1})); @@ -258,7 +256,7 @@ public void testSqueeze() { Operand squeezed = Shapes.squeeze(scope, tfshape); AtomicInteger index = new AtomicInteger(); int[] expected = {4, 2}; - try (TInt32 result = (TInt32)session.runner().fetch(squeezed.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(squeezed.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -274,7 +272,7 @@ public void testSqueeze() { public void testHead() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 1, 2, 1})); @@ -283,7 +281,7 @@ public void testHead() { Operand head = Shapes.head(scope, tfshape); AtomicInteger index = new AtomicInteger(); int[] expected = {4}; - try (TInt32 result = (TInt32)session.runner().fetch(head.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(head.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -299,7 +297,7 @@ public void testHead() { public void testTake() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 1, 2, 1})); @@ -308,7 +306,7 @@ public void testTake() { Operand take = Shapes.take(scope, tfshape, Constant.scalarOf(scope, 2)); AtomicInteger index = new AtomicInteger(); int[] expected = {4, 1}; - try (TInt32 result = (TInt32)session.runner().fetch(take.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(take.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -324,7 +322,7 @@ public void testTake() { public void testTail() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 1, 2, 1})); @@ -333,7 +331,7 @@ public void testTail() { Operand tail = Shapes.tail(scope, tfshape); AtomicInteger index = new AtomicInteger(); int[] expected = {1}; - try (TInt32 result = (TInt32)session.runner().fetch(tail.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(tail.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -349,7 +347,7 @@ public void testTail() { public void testTakeLast() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 1, 2, 1})); @@ -358,7 +356,7 @@ public void testTakeLast() { Operand takeLast = Shapes.takeLast(scope, tfshape, Constant.scalarOf(scope, 3)); AtomicInteger index = new AtomicInteger(); int[] expected = {1, 2, 1}; - try (TInt32 result = (TInt32)session.runner().fetch(takeLast.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(takeLast.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -374,15 +372,16 @@ public void testTakeLast() { public void testPrependInt() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); - Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); + Operand actual = + Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); Shape tfshape = Shape.create(scope, actual); Operand prepend = Shapes.prepend(scope, tfshape, 3); AtomicInteger index = new AtomicInteger(); int[] expected = {3, 4, 2}; - try (TInt32 result = (TInt32)session.runner().fetch(prepend.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(prepend.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -398,15 +397,16 @@ public void testPrependInt() { public void testPrependLong() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); - Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); + Operand actual = + Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); Shape tfshape = Shape.create(scope, actual, TInt64.class); Operand prepend = Shapes.prepend(scope, tfshape, 1L); AtomicInteger index = new AtomicInteger(); long[] expected = {1, 4, 2}; - try (TInt64 result = (TInt64)session.runner().fetch(prepend.asOutput()).run().get(0)) { + try (TInt64 result = (TInt64) session.runner().fetch(prepend.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -422,7 +422,7 @@ public void testPrependLong() { public void testPrependShapeTInt32() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand1 = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual1 = Reshape.create(scope, operand1, Constant.vectorOf(scope, new long[] {4, 2})); @@ -435,7 +435,7 @@ public void testPrependShapeTInt32() { Operand prepend = Shapes.prepend(scope, tfshape1, tfshape2); AtomicInteger index = new AtomicInteger(); int[] expected = {2, 4, 4, 2}; - try (TInt32 result = (TInt32)session.runner().fetch(prepend.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(prepend.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -451,7 +451,7 @@ public void testPrependShapeTInt32() { public void testPrependShapeTInt64() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand1 = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual1 = Reshape.create(scope, operand1, Constant.vectorOf(scope, new long[] {4, 2})); @@ -464,7 +464,7 @@ public void testPrependShapeTInt64() { Operand prepend = Shapes.prepend(scope, tfshape1, tfshape2); AtomicInteger index = new AtomicInteger(); long[] expected = {2, 4, 4, 2}; - try (TInt64 result = (TInt64)session.runner().fetch(prepend.asOutput()).run().get(0)) { + try (TInt64 result = (TInt64) session.runner().fetch(prepend.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -480,15 +480,16 @@ public void testPrependShapeTInt64() { public void testAppendLong() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); - Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); + Operand actual = + Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); Shape tfshape = Shape.create(scope, actual, TInt64.class); Operand append = Shapes.append(scope, tfshape, 2L); AtomicInteger index = new AtomicInteger(); long[] expected = {4L, 2L, 2L}; - try (TInt64 result = (TInt64)session.runner().fetch(append.asOutput()).run().get(0)) { + try (TInt64 result = (TInt64) session.runner().fetch(append.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -504,15 +505,16 @@ public void testAppendLong() { public void testAppendInt() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); - Operand actual = Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); + Operand actual = + Reshape.create(scope, operand, Constant.vectorOf(scope, new long[] {4, 2})); Shape tfshape = Shape.create(scope, actual); Operand append = Shapes.append(scope, tfshape, 2); AtomicInteger index = new AtomicInteger(); int[] expected = {4, 2, 2}; - try (TInt32 result = (TInt32)session.runner().fetch(append.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(append.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -528,7 +530,7 @@ public void testAppendInt() { public void testAppendShapeTInt32() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand1 = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual1 = Reshape.create(scope, operand1, Constant.vectorOf(scope, new long[] {4, 2})); @@ -541,7 +543,7 @@ public void testAppendShapeTInt32() { Operand append = Shapes.append(scope, tfshape1, tfshape2); AtomicInteger index = new AtomicInteger(); int[] expected = {4, 2, 2, 4}; - try (TInt32 result = (TInt32)session.runner().fetch(append.asOutput()).run().get(0)) { + try (TInt32 result = (TInt32) session.runner().fetch(append.asOutput()).run().get(0)) { result .scalars() .forEach( @@ -557,7 +559,7 @@ public void testAppendShapeTInt32() { public void testAppendShapeTInt64() { try (Graph g = new Graph(); Session session = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); Operand operand1 = Constant.arrayOf(scope, new float[] {1, 2, 3, 4, 5, 6, 7, 8}); Operand actual1 = Reshape.create(scope, operand1, Constant.vectorOf(scope, new long[] {4, 2})); @@ -570,7 +572,7 @@ public void testAppendShapeTInt64() { Operand append = Shapes.append(scope, tfshape1, tfshape2); AtomicInteger index = new AtomicInteger(); long[] expected = {4, 2, 2, 4}; - try (TInt64 result = (TInt64)session.runner().fetch(append.asOutput()).run().get(0)) { + try (TInt64 result = (TInt64) session.runner().fetch(append.asOutput()).run().get(0)) { result .scalars() .forEach( diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java index 4121baf3af1..b4d36702c93 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Session; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; @@ -38,10 +39,10 @@ public class ZerosTest { public void createIntZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt32.class); - try (TInt32 result = (TInt32)sess.runner().fetch(op).run().get(0)) { + try (TInt32 result = (TInt32) sess.runner().fetch(op).run().get(0)) { result.scalars().forEach(s -> assertEquals(0, s.getInt())); } } @@ -51,10 +52,10 @@ public void createIntZeros() { public void createFloatZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat32.class); - try (TFloat32 result = (TFloat32)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TFloat32 result = (TFloat32) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(0.0f, s.getFloat(), 0)); } } @@ -64,10 +65,10 @@ public void createFloatZeros() { public void createDoubleZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat64.class); - try (TFloat64 result = (TFloat64)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TFloat64 result = (TFloat64) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(0.0f, s.getDouble(), 0)); } } @@ -77,10 +78,10 @@ public void createDoubleZeros() { public void createLongZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt64.class); - try (TInt64 result = (TInt64)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TInt64 result = (TInt64) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(0L, s.getLong())); } } @@ -90,23 +91,23 @@ public void createLongZeros() { public void createBooleanZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TBool.class); - try (TBool result = (TBool)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TBool result = (TBool) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertFalse(s.getBoolean())); } - } + } } @Test public void createUint8Zeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TUint8.class); - try (TUint8 result = (TUint8)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TUint8 result = (TUint8) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertEquals(0, s.getByte())); } } @@ -116,10 +117,10 @@ public void createUint8Zeros() { public void createStringZeros() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TString.class); - try (TString result = (TString)sess.runner().fetch(op.asOutput()).run().get(0)) { + try (TString result = (TString) sess.runner().fetch(op.asOutput()).run().get(0)) { result.scalars().forEach(s -> assertTrue(s.getObject().isEmpty())); } } @@ -129,10 +130,12 @@ public void createStringZeros() { public void operationsComposingZerosAreCorrectlyNamed() { try (Graph g = new Graph(); Session sess = new Session(g)) { - Scope scope = new Scope(g); + Scope scope = new OpScope(g); long[] shape = {2, 2}; - Zeros zeros = Zeros.create(scope.withSubScope("test"), Constant.vectorOf(scope, shape), TFloat32.class); - List results = sess.runner().addTarget("test/Zeros/Zero").addTarget("test/Zeros/Fill").run(); + Zeros zeros = + Zeros.create(scope.withSubScope("test"), Constant.vectorOf(scope, shape), TFloat32.class); + List results = + sess.runner().addTarget("test/Zeros/Zero").addTarget("test/Zeros/Fill").run(); } } } diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java index 4176f517022..7252d258814 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java @@ -29,6 +29,9 @@ public class Names { public static final ClassName Operator = ClassName.get(OpPackage + ".annotation", "Operator"); public static final ClassName Endpoint = ClassName.get(OpPackage + ".annotation", "Endpoint"); + public static final ClassName OpMetadata = ClassName.get(OpPackage + ".annotation", "OpMetadata"); + public static final ClassName OpInputsMetadata = + ClassName.get(OpPackage + ".annotation", "OpInputsMetadata"); public static final ClassName TType = ClassName.get(TypesPackage + ".family", "TType"); public static final ClassName TString = ClassName.get(TypesPackage, "TString"); @@ -71,6 +74,7 @@ public class Names { ClassName.get(TensorflowPackage, "ConcreteFunction"); public static final ClassName Scope = ClassName.get(OpPackage, "Scope"); + public static final ClassName OpScope = ClassName.get(OpPackage, "OpScope"); public static final TypeName DeviceSpec = ClassName.get(TensorflowPackage, "DeviceSpec"); public static final ClassName Ops = ClassName.get(OpPackage, "Ops"); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java index 14d3f31a977..ff964525006 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/op/generator/ClassGenerator.java @@ -34,6 +34,7 @@ import com.squareup.javapoet.WildcardTypeName; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -70,6 +71,7 @@ enum RenderMode { } private static final String OP_NAME_FIELD = "OP_NAME"; + private static final String INPUTS_CLASS_NAME = "Inputs"; /** The in-progress class builder for the top level op class. */ private final TypeSpec.Builder builder; @@ -214,12 +216,29 @@ private String fullClassName() { return fullPackage + "." + className; } + private ClassName className() { + return ClassName.get(fullPackage, className); + } + + private ClassName inputsClassName() { + return ClassName.get(fullPackage, className, INPUTS_CLASS_NAME); + } + + private TypeName maybeParameterize( + ClassName baseType, Collection parameters) { + if (parameters.isEmpty()) { + return baseType; + } else { + return ParameterizedTypeName.get(baseType, parameters.toArray(new TypeName[0])); + } + } + /** Build the class. */ void buildClass() { builder.addModifiers(Modifier.PUBLIC); if (!isStateSelector) { builder.addModifiers(Modifier.FINAL); - builder.superclass(Names.RawOp); + addInputsMetadataAnnotation(); } if (isStateSubclass) { @@ -341,6 +360,7 @@ void buildClass() { buildConstructor(); buildInputsClass(); + builder.superclass(Names.RawOp); } } @@ -866,7 +886,7 @@ private void buildInterfaceImpl() { /** Add a constructor to get the outputs from an operation */ private void buildConstructor() { - MethodSpec.Builder ctor = MethodSpec.constructorBuilder().addModifiers(Modifier.PRIVATE); + MethodSpec.Builder ctor = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); ctor.addParameter(Names.Operation, "operation"); @@ -881,7 +901,7 @@ private void buildConstructor() { } } CodeBlock.Builder body = CodeBlock.builder(); - body.addStatement("super(operation)"); + body.addStatement("super(operation, $L)", OP_NAME_FIELD); if (op.getOutputArgCount() > 0) { body.addStatement("int outputIdx = 0"); @@ -919,9 +939,9 @@ private void buildConstructor() { builder.addMethod(ctor.build()); } - private void buildInputsClass() { + private Set buildInputsClass() { TypeSpec.Builder inputsBuilder = - TypeSpec.classBuilder("Inputs").addModifiers(Modifier.PUBLIC, Modifier.STATIC); + TypeSpec.classBuilder(INPUTS_CLASS_NAME).addModifiers(Modifier.PUBLIC, Modifier.STATIC); MethodSpec.Builder ctor = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); ctor.addParameter(Names.GraphOperation, "op"); @@ -1008,7 +1028,7 @@ private void buildInputsClass() { } } - TypeName outputClass = ClassName.get(fullPackage, className); + TypeName outputClass = className(); if (!this.builder.typeVariables.isEmpty()) { outputClass = ParameterizedTypeName.get( @@ -1029,6 +1049,25 @@ private void buildInputsClass() { inputsBuilder.addMethod(ctor.build()); inputsBuilder.addTypeVariables(typeVars); + addInputsMetadataAnnotation(inputsBuilder); this.builder.addType(inputsBuilder.build()); + return typeVars; + } + + /** Adds the GeneratedOpMetadata annotation to the op class. */ + private void addInputsMetadataAnnotation() { + builder.addAnnotation( + AnnotationSpec.builder(Names.OpMetadata) + .addMember("opType", "$L", className + ".OP_NAME") + .addMember("inputsClass", "$T.class", inputsClassName()) + .build()); + } + + /** Adds the GeneratedOpInputsMetadata annotation to the op input class. */ + private void addInputsMetadataAnnotation(TypeSpec.Builder inputsBuilder) { + inputsBuilder.addAnnotation( + AnnotationSpec.builder(Names.OpInputsMetadata) + .addMember("outputsClass", "$T.class", className()) + .build()); } } diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 70c7bb0a7de..99277e8fe24 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -155,6 +155,7 @@ public Set getSupportedAnnotationTypes() { } private static class OpsSpec { + private static final Comparator PARAMETER_SPEC_COMPARATOR = (o1, o2) -> { if (o1.parameters.size() > o2.parameters.size()) { @@ -495,7 +496,6 @@ private static TypeSpec buildTopClass(OpsSpec spec) { MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() .addParameter(Names.Scope, "scope") - .addModifiers(Modifier.PRIVATE) .addStatement("this.scope = scope", Names.Scope); TypeSpec.Builder opsBuilder = @@ -675,7 +675,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { .addModifiers(Modifier.PUBLIC, Modifier.STATIC) .addParameter(Names.ExecutionEnvironment, "env") .returns(Names.Ops) - .addStatement("return new Ops(env.baseScope())", Names.Scope) + .addStatement("return new Ops(env.baseScope())") .addJavadoc( "Creates an API for building operations in the provided execution environment\n") .build()); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java index f775b1873b2..15bf224a5de 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/FrameworkOps.java @@ -18,6 +18,7 @@ import org.tensorflow.EagerSession; import org.tensorflow.ExecutionEnvironment; import org.tensorflow.op.Op; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; @@ -71,7 +72,7 @@ private FrameworkOps(Ops core) { * @return the FrameworkOps */ public static FrameworkOps create(ExecutionEnvironment env) { - return new FrameworkOps(new Scope(env)); + return new FrameworkOps(new OpScope(env)); } /** @@ -83,7 +84,7 @@ public static FrameworkOps create(ExecutionEnvironment env) { * @return the FrameworkOps */ public static FrameworkOps create() { - return new FrameworkOps(new Scope(EagerSession.getDefault())); + return new FrameworkOps(new OpScope(EagerSession.getDefault())); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Optimizer.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Optimizer.java index b1366146836..a12e46f82e5 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Optimizer.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/optimizers/Optimizer.java @@ -26,6 +26,7 @@ import org.tensorflow.Operation; import org.tensorflow.Output; import org.tensorflow.op.Op; +import org.tensorflow.op.OpScope; import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.core.NoOp; @@ -280,7 +281,7 @@ private Op applyDense(GradAndVar gradVarPair) { * @return A NoOp with a control dependency on each update operation. */ protected Op finish(List updateOperations, String name) { - Scope scope = new Scope(graph); + Scope scope = new OpScope(graph); scope = scope.withName(name); scope = scope.withControlDependencies(updateOperations); return NoOp.create(scope); From 910b337ce9b38f8bd29715173152a6506228f0ab Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Wed, 17 Nov 2021 23:25:16 +0900 Subject: [PATCH 02/21] Upgrade for TensorFlow 2.7.0 (#395) * Upgrade for TensorFlow 2.7.0 * Rebase and fix ops packages * Remove obsolete patch * Fix graph custom gradient support with TensorFlow 2.7.0 Also fix execution order plugins for javacpp-parser * Remove broken version of python3.exe from MSYS2 on GitHub Actions Co-authored-by: Karl Lessard --- .github/workflows/ci.yml | 2 +- tensorflow-core/tensorflow-core-api/WORKSPACE | 7 +- .../external/custom-grad-helpers.patch | 57 - .../external/tensorflow-proto.patch | 525 +++- tensorflow-core/tensorflow-core-api/pom.xml | 12 +- .../api_def/api_def_AnonymousHashTable.pbtxt | 3 + .../api_def_AssignVariableXlaConcatND.pbtxt | 6 + .../api_def_CollectiveAllToAllV3.pbtxt | 6 + .../api_def/api_def_CollectiveBcastRecv.pbtxt | 4 +- .../api_def_CollectiveBcastRecvV2.pbtxt | 2 +- .../api_def/api_def_CollectiveBcastSend.pbtxt | 4 +- .../api_def_CollectiveBcastSendV2.pbtxt | 2 +- .../api_def/api_def_CollectiveGather.pbtxt | 8 +- .../api_def/api_def_CollectiveGatherV2.pbtxt | 2 +- ...def_CollectiveInitializeCommunicator.pbtxt | 6 + .../api_def/api_def_CollectivePermute.pbtxt | 2 +- .../api_def/api_def_CollectiveReduce.pbtxt | 8 +- .../api_def/api_def_CollectiveReduceV2.pbtxt | 4 +- .../api_def/api_def_CollectiveReduceV3.pbtxt | 6 + .../api_def/api_def_GetElementAtIndex.pbtxt | 3 + .../bazel/api_def/api_def_GetOptions.pbtxt | 3 - .../api_def_IsTPUEmbeddingInitialized.pbtxt | 6 + ...PUEmbeddingAdagradMomentumParameters.pbtxt | 6 + ...mbeddingFrequencyEstimatorParameters.pbtxt | 2 +- ...ncyEstimatorParametersGradAccumDebug.pbtxt | 2 +- .../api_def_ReadVariableXlaSplitND.pbtxt | 6 + ...PUEmbeddingAdagradMomentumParameters.pbtxt | 6 + ...mbeddingFrequencyEstimatorParameters.pbtxt | 2 +- ...ncyEstimatorParametersGradAccumDebug.pbtxt | 2 +- .../bazel/api_def/api_def_SaveDataset.pbtxt | 5 +- .../bazel/api_def/api_def_SaveDatasetV2.pbtxt | 7 + .../api_def_StatelessRandomGetAlg.pbtxt | 2 +- ...api_def_StatelessRandomGetKeyCounter.pbtxt | 2 +- .../src/bazel/api_def/api_def_WindowOp.pbtxt | 6 + .../bazel/api_def/api_def_XlaAllReduce.pbtxt | 6 + .../bazel/api_def/api_def_XlaConcatND.pbtxt | 6 + .../api_def/api_def_XlaReduceScatter.pbtxt | 6 + .../api_def/api_def_XlaRngBitGenerator.pbtxt | 6 + .../bazel/api_def/api_def_XlaSplitND.pbtxt | 6 + .../api_def/api_def_XlaVariadicReduce.pbtxt | 4 +- .../api_def/api_def_XlaVariadicReduceV2.pbtxt | 6 + .../src/bazel/api_def/import/api_import.cc | 2 +- .../org/tensorflow/op/DataOps.java | 153 +- .../annotations/org/tensorflow/op/Ops.java | 63 +- .../org/tensorflow/op/TrainOps.java | 16 +- .../annotations/org/tensorflow/op/XlaOps.java | 97 +- .../internal/c_api/NativeOutput.java | 8 +- .../internal/c_api/NativeStatus.java | 59 +- .../org/tensorflow/internal/c_api/Node.java | 1 + .../internal/c_api/global/tensorflow.java | 99 +- .../tensorflow/op/collective/AllReduce.java | 302 -- ...dcastSend.java => CollectiveAllToAll.java} | 109 +- .../CollectiveBcastRecv.java} | 26 +- .../CollectiveBcastSend.java} | 26 +- .../{GatherV2.java => CollectiveGather.java} | 24 +- ... => CollectiveInitializeCommunicator.java} | 103 +- .../CollectivePermute.java | 2 +- .../{Gather.java => CollectiveReduce.java} | 112 +- .../org/tensorflow/op/collective/Reduce.java | 299 -- .../tensorflow/op/collective/ReduceV2.java | 301 -- .../op/core/AnonymousHashTable.java | 123 + .../tensorflow/op/core/CollectiveGather.java | 224 -- .../tensorflow/op/core/GetElementAtIndex.java | 139 + .../op/{rawops => core}/GetOptions.java | 2 +- .../java/org/tensorflow/op/core/Reverse.java | 4 +- .../org/tensorflow/op/core/ScatterNd.java | 59 +- .../org/tensorflow/op/data/BatchDataset.java | 34 +- .../org/tensorflow/op/data/CacheDataset.java | 49 +- .../op/data/ConcatenateDataset.java | 49 +- .../op/data/DatasetToSingleElement.java | 49 +- .../org/tensorflow/op/data/FilterDataset.java | 49 +- .../op/data/FixedLengthRecordDataset.java | 49 +- .../tensorflow/op/data/FlatMapDataset.java | 49 +- .../tensorflow/op/data/GeneratorDataset.java | 49 +- .../op/data/GroupByWindowDataset.java | 49 +- .../tensorflow/op/data/InterleaveDataset.java | 50 +- .../data/LegacyParallelInterleaveDataset.java | 34 +- .../op/data/MapAndBatchDataset.java | 34 +- .../org/tensorflow/op/data/MapDataset.java | 34 +- .../tensorflow/op/data/OptionsDataset.java | 51 +- .../op/data/PaddedBatchDataset.java | 34 +- .../op/data/ParallelBatchDataset.java | 34 +- .../op/data/ParallelInterleaveDataset.java | 34 +- .../op/data/ParallelMapDataset.java | 34 +- .../tensorflow/op/data/PrefetchDataset.java | 34 +- .../org/tensorflow/op/data/RandomDataset.java | 49 +- .../org/tensorflow/op/data/RangeDataset.java | 50 +- .../org/tensorflow/op/data/ReduceDataset.java | 34 +- .../tensorflow/op/data/RegisterDataset.java | 50 +- .../org/tensorflow/op/data/RepeatDataset.java | 50 +- .../org/tensorflow/op/data/SaveDataset.java | 55 +- .../org/tensorflow/op/data/ScanDataset.java | 34 +- .../org/tensorflow/op/data/ShardDataset.java | 34 +- .../op/data/ShuffleAndRepeatDataset.java | 34 +- .../tensorflow/op/data/ShuffleDataset.java | 34 +- .../org/tensorflow/op/data/SkipDataset.java | 50 +- .../tensorflow/op/data/SnapshotDataset.java | 34 +- .../org/tensorflow/op/data/TakeDataset.java | 50 +- .../tensorflow/op/data/TakeWhileDataset.java | 49 +- .../org/tensorflow/op/data/TensorDataset.java | 49 +- .../op/data/TensorSliceDataset.java | 81 +- .../tensorflow/op/data/TextLineDataset.java | 49 +- .../tensorflow/op/data/TfRecordDataset.java | 49 +- .../tensorflow/op/data/UnbatchDataset.java | 49 +- .../org/tensorflow/op/data/UniqueDataset.java | 49 +- .../org/tensorflow/op/data/WindowDataset.java | 49 +- .../{core/Window.java => data/WindowOp.java} | 30 +- .../org/tensorflow/op/data/ZipDataset.java | 49 +- .../StatelessRandomGetAlg.java | 2 +- .../StatelessRandomGetKeyCounter.java | 2 +- ...encyEstimatorParametersGradAccumDebug.java | 231 -- ...encyEstimatorParametersGradAccumDebug.java | 242 -- .../op/tpu/ConfigureDistributedTPU.java | 34 +- .../op/tpu/IsTPUEmbeddingInitialized.java | 92 + ...EmbeddingADAMParametersGradAccumDebug.java | 237 -- ...ddingAdadeltaParametersGradAccumDebug.java | 237 -- ...PUEmbeddingAdagradMomentumParameters.java} | 52 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 237 -- ...EmbeddingFrequencyEstimatorParameters.java | 4 +- ...ddingMomentumParametersGradAccumDebug.java | 229 -- ...oximalAdagradParametersGradAccumDebug.java | 229 -- ...gProximalYogiParametersGradAccumDebug.java | 232 -- ...eddingRMSPropParametersGradAccumDebug.java | 237 -- ...adientDescentParametersGradAccumDebug.java | 221 -- ...EmbeddingADAMParametersGradAccumDebug.java | 252 -- ...ddingAdadeltaParametersGradAccumDebug.java | 252 -- ...PUEmbeddingAdagradMomentumParameters.java} | 50 +- ...EmbeddingFTRLParametersGradAccumDebug.java | 252 -- ...EmbeddingFrequencyEstimatorParameters.java | 4 +- ...ddingMomentumParametersGradAccumDebug.java | 240 -- ...oximalAdagradParametersGradAccumDebug.java | 240 -- ...gProximalYogiParametersGradAccumDebug.java | 248 -- ...eddingRMSPropParametersGradAccumDebug.java | 252 -- ...adientDescentParametersGradAccumDebug.java | 229 -- .../org/tensorflow/op/train/ApplyAdam.java | 8 +- .../op/train/ResourceApplyAdam.java | 8 +- .../java/org/tensorflow/op/xla/AllReduce.java | 133 + .../op/xla/AssignVariableConcatND.java | 244 ++ .../java/org/tensorflow/op/xla/ConcatND.java | 248 ++ .../op/xla/ReadVariableSplitND.java | 251 ++ .../org/tensorflow/op/xla/ReduceScatter.java | 141 + .../op/xla/RemoveDynamicDimensionSize.java | 8 +- .../tensorflow/op/xla/RngBitGenerator.java | 157 + .../java/org/tensorflow/op/xla/Sharding.java | 64 +- .../java/org/tensorflow/op/xla/SplitND.java | 247 ++ .../op/xla/SpmdFullToShardShape.java | 109 +- .../op/xla/SpmdShardToFullShape.java | 110 +- .../tensorflow/op/xla/XlaVariadicReduce.java | 70 +- .../proto/data/AutotuneOptions.java | 843 ++++++ .../proto/data/AutotuneOptionsOrBuilder.java | 30 + .../tensorflow/proto/data/DataService.java | 789 +++++ .../proto/data/DatasetMetadata.java | 543 ++++ .../proto/data/DatasetOptionsProtos.java | 117 +- .../proto/data/DistributeOptions.java | 8 + .../proto/data/OptimizationOptions.java | 563 +--- .../data/OptimizationOptionsOrBuilder.java | 28 - .../org/tensorflow/proto/data/Options.java | 231 ++ .../proto/data/OptionsOrBuilder.java | 25 + .../proto/data/ThreadingOptions.java | 8 + .../data/experimental/ServiceConfig.java | 850 +++++- .../proto/data/model/ModelProto.java | 1009 +++---- .../proto/data/model/ModelProtoOrBuilder.java | 66 +- .../proto/data/model/ModelProtos.java | 84 +- .../tensorflow/proto/framework/ApiDef.java | 4 +- .../proto/framework/ConfigProto.java | 97 +- .../proto/framework/ConfigProtos.java | 219 +- .../proto/framework/DeviceAttributes.java | 82 + .../framework/DeviceAttributesOrBuilder.java | 11 + .../framework/DeviceAttributesProtos.java | 15 +- .../proto/framework/FullTypeDef.java | 89 + .../proto/framework/FullTypeDefOrBuilder.java | 9 + .../proto/framework/FullTypeId.java | 141 +- .../proto/framework/FullTypeProtos.java | 36 +- .../tensorflow/proto/framework/NodeDef.java | 265 ++ .../proto/framework/NodeDefOrBuilder.java | 34 + .../tensorflow/proto/framework/NodeProto.java | 32 +- .../proto/framework/OptimizerOptions.java | 82 + .../framework/OptimizerOptionsOrBuilder.java | 11 + .../proto/framework/SavedObject.java | 436 +++ .../framework/SavedObjectGraphProtos.java | 150 +- .../proto/framework/SavedObjectOrBuilder.java | 57 + .../tensorflow/proto/framework/Status.java | 478 +++ .../proto/framework/StructProtos.java | 29 +- .../proto/framework/TypeSpecProto.java | 73 + .../framework/TypeSpecProtoOrBuilder.java | 9 + .../proto/profiler/ProfileOptions.java | 23 + .../profiler/ProfileOptionsOrBuilder.java | 4 + .../proto/profiler/ProfilerOptionsProtos.java | 21 +- .../src/gen/resources/ops.pb | Bin 1488903 -> 1485132 bytes .../src/gen/resources/ops.pbtxt | 2642 +++++++++-------- .../internal/c_api/presets/tensorflow.java | 4 +- .../tensorflow-core-api/tensorflow.bazelrc | 127 +- 192 files changed, 12432 insertions(+), 9256 deletions(-) delete mode 100644 tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousHashTable.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AssignVariableXlaConcatND.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveAllToAllV3.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveInitializeCommunicator.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV3.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetElementAtIndex.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_IsTPUEmbeddingInitialized.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ReadVariableXlaSplitND.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDatasetV2.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_WindowOp.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaAllReduce.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConcatND.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaReduceScatter.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaRngBitGenerator.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSplitND.pbtxt create mode 100644 tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduceV2.pbtxt delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/{BroadcastSend.java => CollectiveAllToAll.java} (52%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops/CollectiveBcastRecvV2.java => collective/CollectiveBcastRecv.java} (88%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops/CollectiveBcastSendV2.java => collective/CollectiveBcastSend.java} (87%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/{GatherV2.java => CollectiveGather.java} (90%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/{BroadcastRecv.java => CollectiveInitializeCommunicator.java} (59%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{tpu => collective}/CollectivePermute.java (99%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/{Gather.java => CollectiveReduce.java} (54%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops => core}/GetOptions.java (98%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{core/Window.java => data/WindowOp.java} (82%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops => random}/StatelessRandomGetAlg.java (98%) rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops => random}/StatelessRandomGetKeyCounter.java (99%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/{LoadTPUEmbeddingAdagradParametersGradAccumDebug.java => LoadTPUEmbeddingAdagradMomentumParameters.java} (75%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops => tpu}/LoadTPUEmbeddingFrequencyEstimatorParameters.java (97%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/{RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java => RetrieveTPUEmbeddingAdagradMomentumParameters.java} (77%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.java rename tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/{rawops => tpu}/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java (97%) delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceScatter.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RngBitGenerator.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetMetadata.java create mode 100644 tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e001606a23..1c79ea46728 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -203,7 +203,7 @@ jobs: run: | set "PATH=C:\msys64\usr\bin;%PATH%" echo Removing broken stuff from WSL and MSYS2 - rm "C:/WINDOWS/system32/bash.EXE" "C:/msys64/usr/bin/python.exe" + rm "C:/WINDOWS/system32/bash.EXE" "C:/msys64/usr/bin/python.exe" "C:/msys64/usr/bin/python3.exe" python -m pip install numpy six echo Removing old versions of MSVC that interfere with Bazel bash.exe -lc "find 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/' -iname '14.1*' -exec rm -Rf {} \;" diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index 013338cfbba..8be7695be78 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -12,17 +12,16 @@ http_archive( # ":tensorflow-macosx.patch", # ":tensorflow-windows.patch", # https://github.com/tensorflow/tensorflow/issues/25213 ":tensorflow-proto.patch", - ":custom-grad-helpers.patch", ":custom-grad-symbols.patch", ], patch_tool = "patch", patch_args = ["-p1"], patch_cmds = ["grep -rl 'java_package' tensorflow/core | xargs sed -i.bak 's/^\(.* java_package = \"org\.tensorflow\.\)\(.*\"\)/\\1proto.\\2'/"], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.6.0.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.0.tar.gz", ], - sha256 = "41b32eeaddcbc02b0583660bcf508469550e4cd0f86b22d2abe72dfebeacde0f", - strip_prefix = "tensorflow-2.6.0" + sha256 = "bb124905c7fdacd81e7c842b287c169bbf377d29c74c9dacc04f96c9793747bb", + strip_prefix = "tensorflow-2.7.0" ) # START: Upstream TensorFlow dependencies diff --git a/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch b/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch deleted file mode 100644 index aedf53e2e56..00000000000 --- a/tensorflow-core/tensorflow-core-api/external/custom-grad-helpers.patch +++ /dev/null @@ -1,57 +0,0 @@ -diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc -index f3bf7b98a1e6b..c9194c36c116b 100644 ---- a/tensorflow/c/c_api.cc -+++ b/tensorflow/c/c_api.cc -@@ -782,9 +782,9 @@ void TF_GraphGetTensorShape(TF_Graph* graph, TF_Output output, int64_t* dims, - - extern "C" { - --static TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, -- const char* op_type, -- const char* oper_name) -+TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, -+ const char* op_type, -+ const char* oper_name) - TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { - return new TF_OperationDescription(graph, op_type, oper_name); - } -@@ -1041,8 +1041,8 @@ void TF_SetAttrValueProto(TF_OperationDescription* desc, const char* attr_name, - status->status = Status::OK(); - } - --static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, -- TF_Status* status) -+TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, -+ TF_Status* status) - TF_EXCLUSIVE_LOCKS_REQUIRED(desc->graph->mu) { - Node* ret = nullptr; - -diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h -index 705cf85e0512f..fb746dd4af94f 100644 ---- a/tensorflow/c/c_api.h -+++ b/tensorflow/c/c_api.h -@@ -255,6 +255,12 @@ TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph, - int64_t* dims, int num_dims, - TF_Status* status); - -+// TF_NewOperation, but without locking the graph. -+// Should prefer TF_NewOperation when possible. -+TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, -+ const char* op_type, -+ const char* oper_name); -+ - // Operation will only be added to *graph when TF_FinishOperation() is - // called (assuming TF_FinishOperation() does not return an error). - // *graph must not be deleted until after TF_FinishOperation() is -@@ -406,6 +412,11 @@ TF_CAPI_EXPORT extern void TF_SetAttrValueProto(TF_OperationDescription* desc, - size_t proto_len, - TF_Status* status); - -+// TF_FinishOperation, but without locking the graph. -+// TF_FinishOperation should be preferred when possible. -+TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, -+ TF_Status* status); -+ - // If this function succeeds: - // * *status is set to an OK value, - // * a TF_Operation is added to the graph, diff --git a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch index 7823514e4bc..beef5d567fd 100644 --- a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch +++ b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch @@ -1,6 +1,92 @@ -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/bfc_memory_map.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/bfc_memory_map.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/bfc_memory_map.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/bfc_memory_map.proto 2021-08-30 11:22:48.263351451 +0900 +diff -ruN tensorflow-2.7.0/tensorflow/core/framework/dataset_metadata.proto tensorflow-2.7.0-proto/tensorflow/core/framework/dataset_metadata.proto +--- tensorflow-2.7.0/tensorflow/core/framework/dataset_metadata.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/framework/dataset_metadata.proto 2021-11-09 12:11:55.183453737 +0900 +@@ -2,6 +2,7 @@ + + package tensorflow.data; + ++option java_package = "org.tensorflow.data"; + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_metadata_go_proto"; + + // next: 2 +diff -ruN tensorflow-2.7.0/tensorflow/core/framework/dataset_options.proto tensorflow-2.7.0-proto/tensorflow/core/framework/dataset_options.proto +--- tensorflow-2.7.0/tensorflow/core/framework/dataset_options.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/framework/dataset_options.proto 2021-11-09 12:07:40.449571619 +0900 +@@ -4,6 +4,10 @@ + + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto"; + ++option java_outer_classname = "DatasetOptionsProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.data"; ++ + // Represents the type of auto-sharding we enable. + enum AutoShardPolicy { + // AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding. +diff -ruN tensorflow-2.7.0/tensorflow/core/framework/model.proto tensorflow-2.7.0-proto/tensorflow/core/framework/model.proto +--- tensorflow-2.7.0/tensorflow/core/framework/model.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/framework/model.proto 2021-11-09 12:07:40.450571622 +0900 +@@ -3,6 +3,9 @@ + package tensorflow.data.model; + + option cc_enable_arenas = true; ++option java_outer_classname = "ModelProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.data.model"; + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/model_go_proto"; + + // Class of a node in the performance model. +diff -ruN tensorflow-2.7.0/tensorflow/core/grappler/costs/op_performance_data.proto tensorflow-2.7.0-proto/tensorflow/core/grappler/costs/op_performance_data.proto +--- tensorflow-2.7.0/tensorflow/core/grappler/costs/op_performance_data.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/grappler/costs/op_performance_data.proto 2021-11-09 12:07:40.450571622 +0900 +@@ -17,6 +17,9 @@ + + package tensorflow; + option cc_enable_arenas = true; ++option java_outer_classname = "OpPerformanceDataProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.framework"; + + import "tensorflow/core/framework/tensor.proto"; + import "tensorflow/core/framework/tensor_shape.proto"; +diff -ruN tensorflow-2.7.0/tensorflow/core/lib/core/error_codes.proto tensorflow-2.7.0-proto/tensorflow/core/lib/core/error_codes.proto +--- tensorflow-2.7.0/tensorflow/core/lib/core/error_codes.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/lib/core/error_codes.proto 2021-11-09 12:07:40.447571613 +0900 +@@ -1,3 +1,5 @@ + syntax = "proto3"; + ++option java_package = "org.tensorflow.framework"; ++ + import public "tensorflow/core/protobuf/error_codes.proto"; +diff -ruN tensorflow-2.7.0/tensorflow/core/profiler/profiler_options.proto tensorflow-2.7.0-proto/tensorflow/core/profiler/profiler_options.proto +--- tensorflow-2.7.0/tensorflow/core/profiler/profiler_options.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/profiler/profiler_options.proto 2021-11-09 12:07:40.448571616 +0900 +@@ -1,6 +1,9 @@ + syntax = "proto3"; + + package tensorflow; ++option java_outer_classname = "ProfilerOptionsProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.profiler"; + + // Next ID: 11 + message ProfileOptions { +diff -ruN tensorflow-2.7.0/tensorflow/core/profiler/protobuf/xplane.proto tensorflow-2.7.0-proto/tensorflow/core/profiler/protobuf/xplane.proto +--- tensorflow-2.7.0/tensorflow/core/profiler/protobuf/xplane.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/profiler/protobuf/xplane.proto 2021-11-09 12:07:40.447571613 +0900 +@@ -3,6 +3,9 @@ + package tensorflow.profiler; + + option cc_enable_arenas = true; ++option java_outer_classname = "XPlaneProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.profiler"; + + // A container of parallel XPlanes, generated by one or more profiling sources. + // Next ID: 5 +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/bfc_memory_map.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/bfc_memory_map.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/bfc_memory_map.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/bfc_memory_map.proto 2021-11-09 12:07:40.443571601 +0900 @@ -3,6 +3,9 @@ package tensorflow; @@ -11,23 +97,32 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/bfc_memory_map.proto tensorf // Some of the data from AllocatorStats message MemAllocatorStats { -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/snapshot.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/snapshot.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/snapshot.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/snapshot.proto 2021-08-30 11:22:48.264351453 +0900 -@@ -8,6 +8,10 @@ +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/composite_tensor_variant.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/composite_tensor_variant.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/composite_tensor_variant.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/composite_tensor_variant.proto 2021-11-09 12:07:40.451571625 +0900 +@@ -3,7 +3,7 @@ + package tensorflow; + import "tensorflow/core/protobuf/struct.proto"; +- ++option java_package = "org.tensorflow.framework"; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; -+option java_outer_classname = "SnapshotProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.data.experimental"; -+ - // Each SnapshotRecord represents one batch of pre-processed input data. A batch - // consists of a list of tensors that we encode as TensorProtos. This message - // doesn't store the structure of the batch. -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/device_properties.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/device_properties.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/device_properties.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/device_properties.proto 2021-08-30 11:22:48.264351453 +0900 + // Metadata for CompositeTensorVariant, used when serializing as Variant. +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/data_service.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/data_service.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/data_service.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/data_service.proto 2021-11-09 12:10:45.915184828 +0900 +@@ -2,6 +2,7 @@ + + package tensorflow.data; + ++option java_package = "org.tensorflow.data"; + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; + + message ProcessingModeDef { +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/device_properties.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/device_properties.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/device_properties.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/device_properties.proto 2021-11-09 12:07:40.444571604 +0900 @@ -19,6 +19,8 @@ option cc_enable_arenas = true; @@ -37,10 +132,10 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/device_properties.proto tens option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; message DeviceProperties { -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/saved_object_graph.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/saved_object_graph.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/saved_object_graph.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/saved_object_graph.proto 2021-08-30 11:22:48.265351456 +0900 -@@ -11,6 +11,9 @@ +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/saved_object_graph.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/saved_object_graph.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/saved_object_graph.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/saved_object_graph.proto 2021-11-09 12:07:40.445571607 +0900 +@@ -12,6 +12,9 @@ option cc_enable_arenas = true; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; @@ -50,9 +145,275 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/saved_object_graph.proto ten // A SavedObjectGraph is part of object-based SavedModels in TF 2.0. It // describes the directed graph of Python objects (or equivalent in other -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/struct.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/struct.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/struct.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/struct.proto 2021-08-30 11:22:48.265351456 +0900 +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/saved_object_graph.proto.orig tensorflow-2.7.0-proto/tensorflow/core/protobuf/saved_object_graph.proto.orig +--- tensorflow-2.7.0/tensorflow/core/protobuf/saved_object_graph.proto.orig 1970-01-01 09:00:00.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/saved_object_graph.proto.orig 2021-11-01 10:31:04.000000000 +0900 +@@ -0,0 +1,225 @@ ++syntax = "proto3"; ++ ++package tensorflow; ++ ++import "google/protobuf/any.proto"; ++import "tensorflow/core/framework/tensor_shape.proto"; ++import "tensorflow/core/framework/types.proto"; ++import "tensorflow/core/framework/variable.proto"; ++import "tensorflow/core/framework/versions.proto"; ++import "tensorflow/core/protobuf/struct.proto"; ++import "tensorflow/core/protobuf/trackable_object_graph.proto"; ++ ++option cc_enable_arenas = true; ++option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; ++ ++// A SavedObjectGraph is part of object-based SavedModels in TF 2.0. It ++// describes the directed graph of Python objects (or equivalent in other ++// languages) that make up a model, with nodes[0] at the root. ++ ++// SavedObjectGraph shares some structure with TrackableObjectGraph, but ++// SavedObjectGraph belongs to the MetaGraph and contains pointers to functions ++// and type information, while TrackableObjectGraph lives in the checkpoint ++// and contains pointers only to variable values. ++ ++message SavedObjectGraph { ++ // Flattened list of objects in the object graph. ++ // ++ // The position of the object in this list indicates its id. ++ // Nodes[0] is considered the root node. ++ repeated SavedObject nodes = 1; ++ ++ // Information about captures and output structures in concrete functions. ++ // Referenced from SavedBareConcreteFunction and SavedFunction. ++ map concrete_functions = 2; ++} ++ ++message SavedObject { ++ // Objects which this object depends on: named edges in the dependency ++ // graph. ++ // ++ // Note: currently only valid if kind == "user_object" or "resource". ++ repeated TrackableObjectGraph.TrackableObject.ObjectReference children = 1; ++ ++ // Removed when forking SavedObject from TrackableObjectGraph. ++ reserved "attributes"; ++ reserved 2; ++ ++ // Slot variables owned by this object. This describes the three-way ++ // (optimizer, variable, slot variable) relationship; none of the three ++ // depend on the others directly. ++ // ++ // Note: currently only valid if kind == "user_object". ++ repeated TrackableObjectGraph.TrackableObject.SlotVariableReference ++ slot_variables = 3; ++ ++ oneof kind { ++ SavedUserObject user_object = 4; ++ SavedAsset asset = 5; ++ SavedFunction function = 6; ++ SavedVariable variable = 7; ++ SavedBareConcreteFunction bare_concrete_function = 8; ++ SavedConstant constant = 9; ++ SavedResource resource = 10; ++ CapturedTensor captured_tensor = 12; ++ } ++ ++ map saveable_objects = 11; ++ ++ // The fields below are filled when the user serializes a registered Trackable ++ // class. Registered classes may save additional metadata and supersede the ++ // default loading process where nodes are recreated from the proto. ++ // ++ // The name of the registered class of the form "{package}.{class_name}". ++ // This field is used to search for the registered class at loading time. ++ string registered_name = 13; ++ // The user-generated proto storing metadata for this object, to be passed to ++ // the registered classes's _deserialize_from_proto method when this object is ++ // loaded from the SavedModel. ++ google.protobuf.Any serialized_user_proto = 14; ++} ++ ++// A SavedUserObject is an object (in the object-oriented language of the ++// TensorFlow program) of some user- or framework-defined class other than ++// those handled specifically by the other kinds of SavedObjects. ++// ++// This object cannot be evaluated as a tensor, and therefore cannot be bound ++// to an input of a function. ++message SavedUserObject { ++ // Corresponds to a registration of the type to use in the loading program. ++ string identifier = 1; ++ // Version information from the producer of this SavedUserObject. ++ VersionDef version = 2; ++ // Metadata for deserializing this object. ++ // ++ // Deprecated! At the time of deprecation, Keras was the only user of this ++ // field, and its saving and loading code will be updated shortly. ++ // Please save your application-specific metadata to a separate file. ++ string metadata = 3 [deprecated = true]; ++} ++ ++// A SavedAsset points to an asset in the MetaGraph. ++// ++// When bound to a function this object evaluates to a tensor with the absolute ++// filename. Users should not depend on a particular part of the filename to ++// remain stable (e.g. basename could be changed). ++message SavedAsset { ++ // Index into `MetaGraphDef.asset_file_def[]` that describes the Asset. ++ // ++ // Only the field `AssetFileDef.filename` is used. Other fields, such as ++ // `AssetFileDef.tensor_info`, MUST be ignored. ++ int32 asset_file_def_index = 1; ++} ++ ++// A function with multiple signatures, possibly with non-Tensor arguments. ++message SavedFunction { ++ repeated string concrete_functions = 1; ++ FunctionSpec function_spec = 2; ++} ++ ++message CapturedTensor { ++ // Name of captured tensor ++ string name = 1; ++ ++ // Name of concrete function which contains the computed graph tensor. ++ string concrete_function = 2; ++} ++ ++// Stores low-level information about a concrete function. Referenced in either ++// a SavedFunction or a SavedBareConcreteFunction. ++message SavedConcreteFunction { ++ repeated int32 bound_inputs = 2; ++ ++ // Input in canonicalized form that was received to create this concrete ++ // function. ++ StructuredValue canonicalized_input_signature = 3; ++ // Output that was the return value of this function after replacing all ++ // Tensors with TensorSpecs. This can be an arbitrary nested function and will ++ // be used to reconstruct the full structure from pure tensors. ++ StructuredValue output_signature = 4; ++} ++ ++message SavedBareConcreteFunction { ++ // Identifies a SavedConcreteFunction. ++ string concrete_function_name = 1; ++ ++ // A sequence of unique strings, one per Tensor argument. ++ repeated string argument_keywords = 2; ++ // The prefix of `argument_keywords` which may be identified by position. ++ int64 allowed_positional_arguments = 3; ++ // The spec of the function that this ConcreteFunction is traced from. This ++ // allows the ConcreteFunction to be called with nest structure inputs. This ++ // field may not be populated. If this field is absent, the concrete function ++ // can only be called with flat inputs. ++ // TODO(b/169361281): support calling saved ConcreteFunction with structured ++ // inputs in C++ SavedModel API. ++ FunctionSpec function_spec = 4; ++} ++ ++message SavedConstant { ++ // An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph. ++ string operation = 1; ++} ++ ++// Represents a Variable that is initialized by loading the contents from the ++// checkpoint. ++message SavedVariable { ++ DataType dtype = 1; ++ TensorShapeProto shape = 2; ++ bool trainable = 3; ++ VariableSynchronization synchronization = 4; ++ VariableAggregation aggregation = 5; ++ string name = 6; ++ string device = 7; ++ // List of component variables for a distributed variable. ++ // ++ // When this field is non-empty, the SavedVariable will be assumed ++ // to be a distributed variable defined by the components listed here. ++ // ++ // This is only supported by experimental loaders at the moment. ++ repeated SavedVariable experimental_distributed_variable_components = 8; ++} ++ ++// Represents `FunctionSpec` used in `Function`. This represents a ++// function that has been wrapped as a TensorFlow `Function`. ++message FunctionSpec { ++ // Full arg spec from inspect.getfullargspec(). ++ StructuredValue fullargspec = 1; ++ // Whether this represents a class method. ++ bool is_method = 2; ++ // The input signature, if specified. ++ StructuredValue input_signature = 5; ++ ++ // Whether the function should be compiled by XLA. ++ // ++ // The public interface to `tf.function` uses an optional boolean to ++ // represent three distinct states for this field. Unfortunately, proto3 ++ // removes the ability to explicitly check for the presence or absence of a ++ // field, so we instead map to an enum. ++ // ++ // See `tf.function` for details. ++ enum JitCompile { ++ DEFAULT = 0; ++ ON = 1; ++ OFF = 2; ++ } ++ JitCompile jit_compile = 6; ++ ++ reserved 3, 4; ++} ++ ++// A SavedResource represents a TF object that holds state during its lifetime. ++// An object of this type can have a reference to a: ++// create_resource() and an initialize() function. ++message SavedResource { ++ // A device specification indicating a required placement for the resource ++ // creation function, e.g. "CPU". An empty string allows the user to select a ++ // device. ++ string device = 1; ++} ++ ++message SaveableObject { ++ // Node ids of concrete functions for saving and loading from a checkpoint. ++ int32 save_function = 2; ++ int32 restore_function = 3; ++} +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/service_config.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/service_config.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/service_config.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/service_config.proto 2021-11-09 12:07:40.449571619 +0900 +@@ -1,6 +1,7 @@ + syntax = "proto3"; + + package tensorflow.data.experimental; ++option java_package = "org.tensorflow.data.experimental"; + + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; + +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/snapshot.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/snapshot.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/snapshot.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/snapshot.proto 2021-11-09 12:07:40.444571604 +0900 +@@ -8,6 +8,10 @@ + + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; + ++option java_outer_classname = "SnapshotProtos"; ++option java_multiple_files = true; ++option java_package = "org.tensorflow.data.experimental"; ++ + // Each SnapshotRecord represents one batch of pre-processed input data. A batch + // consists of a list of tensors that we encode as TensorProtos. This message + // doesn't store the structure of the batch. +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/status.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/status.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/status.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/status.proto 2021-11-09 12:09:16.716853813 +0900 +@@ -2,6 +2,8 @@ + + package tensorflow; + ++option java_package = "org.tensorflow.framework"; ++ + // If included as a payload, this message flags the Status to be a "derived" + // Status. Used by StatusGroup to ignore certain Statuses when reporting + // errors to end users. +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/struct.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/struct.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/struct.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/struct.proto 2021-11-09 12:07:40.445571607 +0900 @@ -7,6 +7,9 @@ import "tensorflow/core/framework/types.proto"; @@ -63,9 +424,9 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/struct.proto tensorflow-2.6. // `StructuredValue` represents a dynamically typed value representing various // data structures that are inspired by Python data structures typically used in -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/trackable_object_graph.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/trackable_object_graph.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/trackable_object_graph.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/trackable_object_graph.proto 2021-08-30 11:22:48.266351458 +0900 +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/trackable_object_graph.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/trackable_object_graph.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/trackable_object_graph.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/trackable_object_graph.proto 2021-11-09 12:07:40.446571610 +0900 @@ -4,6 +4,9 @@ option cc_enable_arenas = true; @@ -76,9 +437,9 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/trackable_object_graph.proto // A TensorBundle addition which saves extra information about the objects which // own variables, allowing for more robust checkpoint loading into modified -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/transport_options.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/transport_options.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/transport_options.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/transport_options.proto 2021-08-30 11:22:48.266351458 +0900 +diff -ruN tensorflow-2.7.0/tensorflow/core/protobuf/transport_options.proto tensorflow-2.7.0-proto/tensorflow/core/protobuf/transport_options.proto +--- tensorflow-2.7.0/tensorflow/core/protobuf/transport_options.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/protobuf/transport_options.proto 2021-11-09 12:07:40.446571610 +0900 @@ -3,6 +3,7 @@ package tensorflow; @@ -87,31 +448,9 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/transport_options.proto tens // Extra data needed on a non-RDMA RecvBufResponse. message RecvBufRespExtra { -diff -ruN tensorflow-2.6.0/tensorflow/core/lib/core/error_codes.proto tensorflow-2.6.0-proto/tensorflow/core/lib/core/error_codes.proto ---- tensorflow-2.6.0/tensorflow/core/lib/core/error_codes.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/lib/core/error_codes.proto 2021-08-30 11:22:48.267351461 +0900 -@@ -1,3 +1,5 @@ - syntax = "proto3"; - -+option java_package = "org.tensorflow.framework"; -+ - import public "tensorflow/core/protobuf/error_codes.proto"; -diff -ruN tensorflow-2.6.0/tensorflow/core/profiler/protobuf/xplane.proto tensorflow-2.6.0-proto/tensorflow/core/profiler/protobuf/xplane.proto ---- tensorflow-2.6.0/tensorflow/core/profiler/protobuf/xplane.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/profiler/protobuf/xplane.proto 2021-08-30 11:22:48.267351461 +0900 -@@ -3,6 +3,9 @@ - package tensorflow.profiler; - - option cc_enable_arenas = true; -+option java_outer_classname = "XPlaneProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.profiler"; - - // A container of parallel XPlanes, generated by one or more profiling sources. - // Next ID: 5 -diff -ruN tensorflow-2.6.0/tensorflow/core/util/memmapped_file_system.proto tensorflow-2.6.0-proto/tensorflow/core/util/memmapped_file_system.proto ---- tensorflow-2.6.0/tensorflow/core/util/memmapped_file_system.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/util/memmapped_file_system.proto 2021-08-30 11:22:48.268351463 +0900 +diff -ruN tensorflow-2.7.0/tensorflow/core/util/memmapped_file_system.proto tensorflow-2.7.0-proto/tensorflow/core/util/memmapped_file_system.proto +--- tensorflow-2.7.0/tensorflow/core/util/memmapped_file_system.proto 2021-11-01 10:31:04.000000000 +0900 ++++ tensorflow-2.7.0-proto/tensorflow/core/util/memmapped_file_system.proto 2021-11-09 12:07:40.448571616 +0900 @@ -17,6 +17,9 @@ package tensorflow; @@ -122,79 +461,3 @@ diff -ruN tensorflow-2.6.0/tensorflow/core/util/memmapped_file_system.proto tens // A message that describes one region of memmapped file. message MemmappedFileSystemDirectoryElement { -diff -ruN tensorflow-2.6.0/tensorflow/core/profiler/profiler_options.proto tensorflow-2.6.0-proto/tensorflow/core/profiler/profiler_options.proto ---- tensorflow-2.6.0/tensorflow/core/profiler/profiler_options.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/profiler/profiler_options.proto 2021-08-30 11:22:48.268351463 +0900 -@@ -1,6 +1,9 @@ - syntax = "proto3"; - - package tensorflow; -+option java_outer_classname = "ProfilerOptionsProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.profiler"; - - // Next ID: 11 - message ProfileOptions { -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/service_config.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/service_config.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/service_config.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/service_config.proto 2021-08-30 11:22:48.269351466 +0900 -@@ -1,6 +1,7 @@ - syntax = "proto3"; - - package tensorflow.data.experimental; -+option java_package = "org.tensorflow.data.experimental"; - - option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; - -diff -ruN tensorflow-2.6.0/tensorflow/core/framework/dataset_options.proto tensorflow-2.6.0-proto/tensorflow/core/framework/dataset_options.proto ---- tensorflow-2.6.0/tensorflow/core/framework/dataset_options.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/framework/dataset_options.proto 2021-08-30 11:22:48.269351466 +0900 -@@ -4,6 +4,10 @@ - - option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto"; - -+option java_outer_classname = "DatasetOptionsProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.data"; -+ - // Represents the type of auto-sharding we enable. - enum AutoShardPolicy { - // AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding. -diff -ruN tensorflow-2.6.0/tensorflow/core/framework/model.proto tensorflow-2.6.0-proto/tensorflow/core/framework/model.proto ---- tensorflow-2.6.0/tensorflow/core/framework/model.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/framework/model.proto 2021-08-30 11:23:28.579451037 +0900 -@@ -3,6 +3,9 @@ - package tensorflow.data.model; - - option cc_enable_arenas = true; -+option java_outer_classname = "ModelProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.data.model"; - option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/model_go_proto"; - - // Class of a node in the performance model. -diff -ruN tensorflow-2.6.0/tensorflow/core/grappler/costs/op_performance_data.proto tensorflow-2.6.0-proto/tensorflow/core/grappler/costs/op_performance_data.proto ---- tensorflow-2.6.0/tensorflow/core/grappler/costs/op_performance_data.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/grappler/costs/op_performance_data.proto 2021-08-30 11:22:48.270351468 +0900 -@@ -17,6 +17,9 @@ - - package tensorflow; - option cc_enable_arenas = true; -+option java_outer_classname = "OpPerformanceDataProtos"; -+option java_multiple_files = true; -+option java_package = "org.tensorflow.framework"; - - import "tensorflow/core/framework/tensor.proto"; - import "tensorflow/core/framework/tensor_shape.proto"; -diff -ruN tensorflow-2.6.0/tensorflow/core/protobuf/composite_tensor_variant.proto tensorflow-2.6.0-proto/tensorflow/core/protobuf/composite_tensor_variant.proto ---- tensorflow-2.6.0/tensorflow/core/protobuf/composite_tensor_variant.proto 2021-08-10 04:10:27.000000000 +0900 -+++ tensorflow-2.6.0-proto/tensorflow/core/protobuf/composite_tensor_variant.proto 2021-08-30 15:43:37.086090343 +0900 -@@ -3,7 +3,7 @@ - package tensorflow; - - import "tensorflow/core/protobuf/struct.proto"; -- -+option java_package = "org.tensorflow.framework"; - option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; - - // Metadata for CompositeTensorVariant, used when serializing as Variant. diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 5c35a6e2c62..dc990be8850 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -147,9 +147,13 @@ maven-resources-plugin 3.1.0 + javacpp-parser - generate-sources + initialize resources @@ -184,12 +188,14 @@ javacpp-parser - generate-sources + initialize compile diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousHashTable.pbtxt new file mode 100644 index 00000000000..5508431f163 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousHashTable.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "AnonymousHashTable" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AssignVariableXlaConcatND.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AssignVariableXlaConcatND.pbtxt new file mode 100644 index 00000000000..f45351cddeb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AssignVariableXlaConcatND.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "AssignVariableXlaConcatND" + endpoint { + name: "xla.AssignVariableConcatND" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveAllToAllV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveAllToAllV3.pbtxt new file mode 100644 index 00000000000..7b45fb28bc9 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveAllToAllV3.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "CollectiveAllToAllV3" + endpoint { + name: "collective.CollectiveAllToAll" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecv.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecv.pbtxt index 8ada333e446..48feac2efa0 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecv.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecv.pbtxt @@ -1,6 +1,4 @@ op { graph_op_name: "CollectiveBcastRecv" - endpoint { - name: "collective.BroadcastRecv" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt index bc995cab1bb..c7d78492cab 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastRecvV2.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "CollectiveBcastRecvV2" endpoint { - name: "rawops.CollectiveBcastRecvV2" + name: "collective.CollectiveBcastRecv" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSend.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSend.pbtxt index 18b4bef345e..3d444c00bf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSend.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSend.pbtxt @@ -1,6 +1,4 @@ op { graph_op_name: "CollectiveBcastSend" - endpoint { - name: "collective.BroadcastSend" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt index 226379d303e..9eb747f9a4b 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveBcastSendV2.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "CollectiveBcastSendV2" endpoint { - name: "rawops.CollectiveBcastSendV2" + name: "collective.CollectiveBcastSend" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGather.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGather.pbtxt index 06de25f3442..8479efea1a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGather.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGather.pbtxt @@ -1,10 +1,4 @@ op { graph_op_name: "CollectiveGather" - endpoint: { - name: "collective.Gather" - } - endpoint: { - name: "CollectiveGather" - deprecated: true - } + visibility: SKIP } \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGatherV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGatherV2.pbtxt index 58f3ec99b53..a9179d98926 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGatherV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveGatherV2.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "CollectiveGatherV2" endpoint: { - name: "collective.GatherV2" + name: "collective.CollectiveGather" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveInitializeCommunicator.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveInitializeCommunicator.pbtxt new file mode 100644 index 00000000000..de44c83cdf0 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveInitializeCommunicator.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "CollectiveInitializeCommunicator" + endpoint { + name: "collective.CollectiveInitializeCommunicator" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectivePermute.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectivePermute.pbtxt index 83b880f3fae..490242ba9c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectivePermute.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectivePermute.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "CollectivePermute" endpoint { - name: "tpu.CollectivePermute" + name: "collective.CollectivePermute" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduce.pbtxt index 0453fcdcdf6..e810cfb06da 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduce.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduce.pbtxt @@ -1,10 +1,4 @@ op { graph_op_name: "CollectiveReduce" - endpoint { - name: "collective.Reduce" - } - endpoint { - name: "collective.AllReduce" - deprecated: true - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV2.pbtxt index c3cbd7d2a39..4fe3c35b51e 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV2.pbtxt @@ -1,6 +1,4 @@ op { graph_op_name: "CollectiveReduceV2" - endpoint { - name: "collective.ReduceV2" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV3.pbtxt new file mode 100644 index 00000000000..c7234159624 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_CollectiveReduceV3.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "CollectiveReduceV3" + endpoint { + name: "collective.CollectiveReduce" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetElementAtIndex.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetElementAtIndex.pbtxt new file mode 100644 index 00000000000..041f46e450e --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetElementAtIndex.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "GetElementAtIndex" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt index 188a9290620..382e395959f 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_GetOptions.pbtxt @@ -1,6 +1,3 @@ op { graph_op_name: "GetOptions" - endpoint { - name: "rawops.GetOptions" - } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_IsTPUEmbeddingInitialized.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_IsTPUEmbeddingInitialized.pbtxt new file mode 100644 index 00000000000..e0e66156b85 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_IsTPUEmbeddingInitialized.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "IsTPUEmbeddingInitialized" + endpoint { + name: "tpu.IsTPUEmbeddingInitialized" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt new file mode 100644 index 00000000000..4b81cfb1c65 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingAdagradMomentumParameters.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "LoadTPUEmbeddingAdagradMomentumParameters" + endpoint { + name: "tpu.LoadTPUEmbeddingAdagradMomentumParameters" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt index 99f5e920acf..97111705c86 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParameters" endpoint { - name: "rawops.LoadTPUEmbeddingFrequencyEstimatorParameters" + name: "tpu.LoadTPUEmbeddingFrequencyEstimatorParameters" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt index 0ced843d210..1b4493b7fd5 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" endpoint { - name: "rawops.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + name: "tpu.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ReadVariableXlaSplitND.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ReadVariableXlaSplitND.pbtxt new file mode 100644 index 00000000000..81374fce42a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_ReadVariableXlaSplitND.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "ReadVariableXlaSplitND" + endpoint { + name: "xla.ReadVariableSplitND" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt new file mode 100644 index 00000000000..506117ac2b7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "RetrieveTPUEmbeddingAdagradMomentumParameters" + endpoint { + name: "tpu.RetrieveTPUEmbeddingAdagradMomentumParameters" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt index b69d019664f..02da67a33d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParameters" endpoint { - name: "rawops.RetrieveTPUEmbeddingFrequencyEstimatorParameters" + name: "tpu.RetrieveTPUEmbeddingFrequencyEstimatorParameters" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt index 734b2cb441e..5f54c810a4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" endpoint { - name: "rawops.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" + name: "tpu.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDataset.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDataset.pbtxt index 20b5562a385..8c4d87ac61c 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDataset.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDataset.pbtxt @@ -1,7 +1,4 @@ op { graph_op_name: "SaveDataset" - visibility: VISIBLE - endpoint { - name: "data.SaveDataset" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDatasetV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDatasetV2.pbtxt new file mode 100644 index 00000000000..a0723dc3d7b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_SaveDatasetV2.pbtxt @@ -0,0 +1,7 @@ +op { + graph_op_name: "SaveDatasetV2" + visibility: VISIBLE + endpoint { + name: "data.SaveDataset" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt index 276b6f4422e..af3865f9438 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetAlg.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "StatelessRandomGetAlg" endpoint { - name: "rawops.StatelessRandomGetAlg" + name: "random.StatelessRandomGetAlg" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt index e0e2f305b7f..24ae0b187e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_StatelessRandomGetKeyCounter.pbtxt @@ -1,6 +1,6 @@ op { graph_op_name: "StatelessRandomGetKeyCounter" endpoint { - name: "rawops.StatelessRandomGetKeyCounter" + name: "random.StatelessRandomGetKeyCounter" } } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_WindowOp.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_WindowOp.pbtxt new file mode 100644 index 00000000000..d5683040e7b --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_WindowOp.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "WindowOp" + endpoint { + name: "data.WindowOp" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaAllReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaAllReduce.pbtxt new file mode 100644 index 00000000000..3bd3a004edb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaAllReduce.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaAllReduce" + endpoint { + name: "xla.AllReduce" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConcatND.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConcatND.pbtxt new file mode 100644 index 00000000000..e85cc4435f1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaConcatND.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaConcatND" + endpoint { + name: "xla.ConcatND" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaReduceScatter.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaReduceScatter.pbtxt new file mode 100644 index 00000000000..3d84f44b6cd --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaReduceScatter.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaReduceScatter" + endpoint { + name: "xla.ReduceScatter" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaRngBitGenerator.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaRngBitGenerator.pbtxt new file mode 100644 index 00000000000..7246acf05fb --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaRngBitGenerator.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaRngBitGenerator" + endpoint { + name: "xla.RngBitGenerator" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSplitND.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSplitND.pbtxt new file mode 100644 index 00000000000..850412bf28c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaSplitND.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaSplitND" + endpoint { + name: "xla.SplitND" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduce.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduce.pbtxt index 7bd52b37d87..6e10bd1147d 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduce.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduce.pbtxt @@ -1,6 +1,4 @@ op { graph_op_name: "XlaVariadicReduce" - endpoint: { - name: "xla.XlaVariadicReduce" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduceV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduceV2.pbtxt new file mode 100644 index 00000000000..0be02caac52 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_XlaVariadicReduceV2.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "XlaVariadicReduceV2" + endpoint: { + name: "xla.XlaVariadicReduce" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc index 429761e1ce7..e8850f7867d 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc @@ -177,7 +177,7 @@ int main(int argc, char* argv[]) { python_api_map.UpdateDocs(); // Load golden API member names with their module path - string golden_api_path = tf_src_dir + "/tensorflow/tools/api/golden/v1/*.pbtxt"; + string golden_api_path = tf_src_dir + "/tensorflow/tools/api/golden/v2/*.pbtxt"; vector> golden_api_names; vector golden_api_files; TF_CHECK_OK(env->GetMatchingPaths(golden_api_path, &golden_api_files)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java index d69c44cc0c7..5d4583c01c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java @@ -275,12 +275,13 @@ public CSVDataset cSVDataset(Operand filenames, Operand compre * @param cache The cache value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of CacheDataset */ public CacheDataset cacheDataset(Operand inputDataset, Operand filename, Operand cache, List> outputTypes, - List outputShapes) { - return CacheDataset.create(scope, inputDataset, filename, cache, outputTypes, outputShapes); + List outputShapes, CacheDataset.Options... options) { + return CacheDataset.create(scope, inputDataset, filename, cache, outputTypes, outputShapes, options); } /** @@ -326,12 +327,13 @@ public ChooseFastestDataset chooseFastestDataset(Iterable inputDataset, Operand anotherDataset, List> outputTypes, - List outputShapes) { - return ConcatenateDataset.create(scope, inputDataset, anotherDataset, outputTypes, outputShapes); + List outputShapes, ConcatenateDataset.Options... options) { + return ConcatenateDataset.create(scope, inputDataset, anotherDataset, outputTypes, outputShapes, options); } /** @@ -401,11 +403,13 @@ public DatasetToGraph datasetToGraph(Operand inputDataset, * @param dataset A handle to a dataset that contains a single element. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of DatasetToSingleElement */ public DatasetToSingleElement datasetToSingleElement(Operand dataset, - List> outputTypes, List outputShapes) { - return DatasetToSingleElement.create(scope, dataset, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + DatasetToSingleElement.Options... options) { + return DatasetToSingleElement.create(scope, dataset, outputTypes, outputShapes, options); } /** @@ -515,12 +519,14 @@ public FilterByLastComponentDataset filterByLastComponentDataset( * @param predicate A function returning a scalar boolean. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of FilterDataset */ public FilterDataset filterDataset(Operand inputDataset, Iterable> otherArguments, ConcreteFunction predicate, - List> outputTypes, List outputShapes) { - return FilterDataset.create(scope, inputDataset, otherArguments, predicate, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + FilterDataset.Options... options) { + return FilterDataset.create(scope, inputDataset, otherArguments, predicate, outputTypes, outputShapes, options); } /** @@ -547,12 +553,14 @@ public FinalizeDataset finalizeDataset(Operand inputDataset, * @param footerBytes The footerBytes value * @param bufferSize The bufferSize value * @param compressionType The compressionType value + * @param options carries optional attribute values * @return a new instance of FixedLengthRecordDataset */ public FixedLengthRecordDataset fixedLengthRecordDataset(Operand filenames, Operand headerBytes, Operand recordBytes, Operand footerBytes, - Operand bufferSize, Operand compressionType) { - return FixedLengthRecordDataset.create(scope, filenames, headerBytes, recordBytes, footerBytes, bufferSize, compressionType); + Operand bufferSize, Operand compressionType, + FixedLengthRecordDataset.Options... options) { + return FixedLengthRecordDataset.create(scope, filenames, headerBytes, recordBytes, footerBytes, bufferSize, compressionType, options); } /** @@ -568,12 +576,14 @@ public FixedLengthRecordDataset fixedLengthRecordDataset(Operand filena * {@code output_types} and {@code output_shapes}. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of FlatMapDataset */ public FlatMapDataset flatMapDataset(Operand inputDataset, Iterable> otherArguments, ConcreteFunction f, - List> outputTypes, List outputShapes) { - return FlatMapDataset.create(scope, inputDataset, otherArguments, f, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + FlatMapDataset.Options... options) { + return FlatMapDataset.create(scope, inputDataset, otherArguments, f, outputTypes, outputShapes, options); } /** @@ -587,13 +597,15 @@ public FlatMapDataset flatMapDataset(Operand inputDataset, * @param finalizeFunc The value of the finalizeFunc attribute * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of GeneratorDataset */ public GeneratorDataset generatorDataset(Iterable> initFuncOtherArgs, Iterable> nextFuncOtherArgs, Iterable> finalizeFuncOtherArgs, ConcreteFunction initFunc, ConcreteFunction nextFunc, ConcreteFunction finalizeFunc, - List> outputTypes, List outputShapes) { - return GeneratorDataset.create(scope, initFuncOtherArgs, nextFuncOtherArgs, finalizeFuncOtherArgs, initFunc, nextFunc, finalizeFunc, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + GeneratorDataset.Options... options) { + return GeneratorDataset.create(scope, initFuncOtherArgs, nextFuncOtherArgs, finalizeFuncOtherArgs, initFunc, nextFunc, finalizeFunc, outputTypes, outputShapes, options); } /** @@ -643,14 +655,16 @@ public GroupByReducerDataset groupByReducerDataset(Operand inpu * @param windowSizeFunc The value of the windowSizeFunc attribute * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of GroupByWindowDataset */ public GroupByWindowDataset groupByWindowDataset(Operand inputDataset, Iterable> keyFuncOtherArguments, Iterable> reduceFuncOtherArguments, Iterable> windowSizeFuncOtherArguments, ConcreteFunction keyFunc, ConcreteFunction reduceFunc, ConcreteFunction windowSizeFunc, - List> outputTypes, List outputShapes) { - return GroupByWindowDataset.create(scope, inputDataset, keyFuncOtherArguments, reduceFuncOtherArguments, windowSizeFuncOtherArguments, keyFunc, reduceFunc, windowSizeFunc, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + GroupByWindowDataset.Options... options) { + return GroupByWindowDataset.create(scope, inputDataset, keyFuncOtherArguments, reduceFuncOtherArguments, windowSizeFuncOtherArguments, keyFunc, reduceFunc, windowSizeFunc, outputTypes, outputShapes, options); } /** @@ -697,12 +711,14 @@ public InitializeTableFromDataset initializeTableFromDataset(Operand inputDataset, Iterable> otherArguments, Operand cycleLength, Operand blockLength, - ConcreteFunction f, List> outputTypes, List outputShapes) { - return InterleaveDataset.create(scope, inputDataset, otherArguments, cycleLength, blockLength, f, outputTypes, outputShapes); + ConcreteFunction f, List> outputTypes, List outputShapes, + InterleaveDataset.Options... options) { + return InterleaveDataset.create(scope, inputDataset, otherArguments, cycleLength, blockLength, f, outputTypes, outputShapes, options); } /** @@ -1076,12 +1092,13 @@ public OptionalNone optionalNone() { * @param serializedOptions A {@code tf.string} scalar {@code tf.Tensor} of serialized {@code tf.data.Options} protocol buffer. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of OptionsDataset */ public OptionsDataset optionsDataset(Operand inputDataset, - String serializedOptions, List> outputTypes, - List outputShapes) { - return OptionsDataset.create(scope, inputDataset, serializedOptions, outputTypes, outputShapes); + String serializedOptions, List> outputTypes, List outputShapes, + OptionsDataset.Options... options) { + return OptionsDataset.create(scope, inputDataset, serializedOptions, outputTypes, outputShapes, options); } /** @@ -1284,11 +1301,13 @@ public PrivateThreadPoolDataset privateThreadPoolDataset(Operand seed, Operand seed2, - List> outputTypes, List outputShapes) { - return RandomDataset.create(scope, seed, seed2, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + RandomDataset.Options... options) { + return RandomDataset.create(scope, seed, seed2, outputTypes, outputShapes, options); } /** @@ -1299,11 +1318,13 @@ public RandomDataset randomDataset(Operand seed, Operand seed2, * @param step corresponds to step in python's xrange(). * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of RangeDataset */ public RangeDataset rangeDataset(Operand start, Operand stop, - Operand step, List> outputTypes, List outputShapes) { - return RangeDataset.create(scope, start, stop, step, outputTypes, outputShapes); + Operand step, List> outputTypes, List outputShapes, + RangeDataset.Options... options) { + return RangeDataset.create(scope, start, stop, step, outputTypes, outputShapes, options); } /** @@ -1354,11 +1375,12 @@ public ReduceDataset reduceDataset(Operand inputDataset, * @param address The address value * @param protocol The protocol value * @param externalStatePolicy The value of the externalStatePolicy attribute + * @param options carries optional attribute values * @return a new instance of RegisterDataset */ public RegisterDataset registerDataset(Operand dataset, Operand address, - Operand protocol, Long externalStatePolicy) { - return RegisterDataset.create(scope, dataset, address, protocol, externalStatePolicy); + Operand protocol, Long externalStatePolicy, RegisterDataset.Options... options) { + return RegisterDataset.create(scope, dataset, address, protocol, externalStatePolicy, options); } /** @@ -1369,11 +1391,13 @@ public RegisterDataset registerDataset(Operand dataset, Operand * be repeated. A value of {@code -1} indicates that it should be repeated infinitely. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of RepeatDataset */ public RepeatDataset repeatDataset(Operand inputDataset, Operand count, - List> outputTypes, List outputShapes) { - return RepeatDataset.create(scope, inputDataset, count, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + RepeatDataset.Options... options) { + return RepeatDataset.create(scope, inputDataset, count, outputTypes, outputShapes, options); } /** @@ -1400,19 +1424,22 @@ public SamplingDataset samplingDataset(Operand inputDataset, } /** - * The SaveDataset operation + * The SaveDatasetV2 operation * * @param inputDataset The inputDataset value * @param path The path value * @param shardFuncOtherArgs The shardFuncOtherArgs value * @param shardFunc The value of the shardFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute * @param options carries optional attribute values * @return a new instance of SaveDataset */ public SaveDataset saveDataset(Operand inputDataset, Operand path, Iterable> shardFuncOtherArgs, ConcreteFunction shardFunc, + List> outputTypes, List outputShapes, SaveDataset.Options... options) { - return SaveDataset.create(scope, inputDataset, path, shardFuncOtherArgs, shardFunc, options); + return SaveDataset.create(scope, inputDataset, path, shardFuncOtherArgs, shardFunc, outputTypes, outputShapes, options); } /** @@ -1531,11 +1558,13 @@ public ShuffleDataset shuffleDataset(Operand inputDataset, * that should be skipped. If count is -1, skips everything. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of SkipDataset */ public SkipDataset skipDataset(Operand inputDataset, Operand count, - List> outputTypes, List outputShapes) { - return SkipDataset.create(scope, inputDataset, count, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + SkipDataset.Options... options) { + return SkipDataset.create(scope, inputDataset, count, outputTypes, outputShapes, options); } /** @@ -1636,11 +1665,13 @@ public SqlDataset sqlDataset(Operand driverName, Operand dataS * is taken. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TakeDataset */ public TakeDataset takeDataset(Operand inputDataset, Operand count, - List> outputTypes, List outputShapes) { - return TakeDataset.create(scope, inputDataset, count, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + TakeDataset.Options... options) { + return TakeDataset.create(scope, inputDataset, count, outputTypes, outputShapes, options); } /** @@ -1658,12 +1689,14 @@ public TakeDataset takeDataset(Operand inputDataset, Operand inputDataset, Iterable> otherArguments, ConcreteFunction predicate, - List> outputTypes, List outputShapes) { - return TakeWhileDataset.create(scope, inputDataset, otherArguments, predicate, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + TakeWhileDataset.Options... options) { + return TakeWhileDataset.create(scope, inputDataset, otherArguments, predicate, outputTypes, outputShapes, options); } /** @@ -1671,10 +1704,12 @@ public TakeWhileDataset takeWhileDataset(Operand inputDataset, * * @param components The components value * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TensorDataset */ - public TensorDataset tensorDataset(Iterable> components, List outputShapes) { - return TensorDataset.create(scope, components, outputShapes); + public TensorDataset tensorDataset(Iterable> components, List outputShapes, + TensorDataset.Options... options) { + return TensorDataset.create(scope, components, outputShapes, options); } /** @@ -1682,11 +1717,12 @@ public TensorDataset tensorDataset(Iterable> components, List * * @param components The components value * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TensorSliceDataset */ public TensorSliceDataset tensorSliceDataset(Iterable> components, - List outputShapes) { - return TensorSliceDataset.create(scope, components, outputShapes); + List outputShapes, TensorSliceDataset.Options... options) { + return TensorSliceDataset.create(scope, components, outputShapes, options); } /** @@ -1697,11 +1733,13 @@ public TensorSliceDataset tensorSliceDataset(Iterable> components, * @param compressionType A scalar containing either (i) the empty string (no * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar containing the number of bytes to buffer. + * @param options carries optional attribute values * @return a new instance of TextLineDataset */ public TextLineDataset textLineDataset(Operand filenames, - Operand compressionType, Operand bufferSize) { - return TextLineDataset.create(scope, filenames, compressionType, bufferSize); + Operand compressionType, Operand bufferSize, + TextLineDataset.Options... options) { + return TextLineDataset.create(scope, filenames, compressionType, bufferSize, options); } /** @@ -1713,11 +1751,13 @@ public TextLineDataset textLineDataset(Operand filenames, * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar representing the number of bytes to buffer. A value of * 0 means no buffering will be performed. + * @param options carries optional attribute values * @return a new instance of TfRecordDataset */ public TfRecordDataset tfRecordDataset(Operand filenames, - Operand compressionType, Operand bufferSize) { - return TfRecordDataset.create(scope, filenames, compressionType, bufferSize); + Operand compressionType, Operand bufferSize, + TfRecordDataset.Options... options) { + return TfRecordDataset.create(scope, filenames, compressionType, bufferSize, options); } /** @@ -1741,11 +1781,13 @@ public ThreadPoolDataset threadPoolDataset(Operand inputDataset * @param inputDataset The inputDataset value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of UnbatchDataset */ public UnbatchDataset unbatchDataset(Operand inputDataset, - List> outputTypes, List outputShapes) { - return UnbatchDataset.create(scope, inputDataset, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + UnbatchDataset.Options... options) { + return UnbatchDataset.create(scope, inputDataset, outputTypes, outputShapes, options); } /** @@ -1754,11 +1796,13 @@ public UnbatchDataset unbatchDataset(Operand inputDataset, * @param inputDataset The inputDataset value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of UniqueDataset */ public UniqueDataset uniqueDataset(Operand inputDataset, - List> outputTypes, List outputShapes) { - return UniqueDataset.create(scope, inputDataset, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + UniqueDataset.Options... options) { + return UniqueDataset.create(scope, inputDataset, outputTypes, outputShapes, options); } /** @@ -1822,13 +1866,14 @@ public UnwrapDatasetVariant unwrapDatasetVariant(Operand inputH * dropped if its size is smaller than {@code window_size}. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of WindowDataset */ public WindowDataset windowDataset(Operand inputDataset, Operand sizeOutput, Operand shift, Operand stride, Operand dropRemainder, List> outputTypes, - List outputShapes) { - return WindowDataset.create(scope, inputDataset, sizeOutput, shift, stride, dropRemainder, outputTypes, outputShapes); + List outputShapes, WindowDataset.Options... options) { + return WindowDataset.create(scope, inputDataset, sizeOutput, shift, stride, dropRemainder, outputTypes, outputShapes, options); } /** @@ -1851,11 +1896,13 @@ public WrapDatasetVariant wrapDatasetVariant(Operand inputHandl * @param inputDatasets List of {@code N} variant Tensors representing datasets to be zipped together. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of ZipDataset */ public ZipDataset zipDataset(Iterable> inputDatasets, - List> outputTypes, List outputShapes) { - return ZipDataset.create(scope, inputDatasets, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + ZipDataset.Options... options) { + return ZipDataset.create(scope, inputDatasets, outputTypes, outputShapes, options); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 4397483d606..fc6b447ab31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -377,10 +377,10 @@ public final class Ops { public final SignalOps signal; - public final TrainOps train; - public final QuantizationOps quantization; + public final TrainOps train; + private final Scope scope; Ops(Scope scope) { @@ -403,8 +403,8 @@ public final class Ops { math = new MathOps(this); audio = new AudioOps(this); signal = new SignalOps(this); - train = new TrainOps(this); quantization = new QuantizationOps(this); + train = new TrainOps(this); } /** @@ -4798,9 +4798,7 @@ public ResourceStridedSliceAssign resourceStridedSliceAssign /** * Reverses specific dimensions of a tensor. - * NOTE {@code tf.reverse} has now changed behavior in preparation for 1.0. - * {@code tf.reverse_v2} is currently an alias that will be deprecated before TF 1.0. - *

    Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of + * Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of * dimensions of {@code tensor} to reverse. This operation reverses each dimension * {@code i} for which there exists {@code j} s.t. {@code axis[j] == i}. *

    {@code tensor} can have up to 8 dimensions. The number of dimensions specified @@ -5126,34 +5124,37 @@ public ScatterMul scatterMul(Operand ref, } /** - * Scatter {@code updates} into a new tensor according to {@code indices}. - * Creates a new tensor by applying sparse {@code updates} to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given {@code shape} according to indices. This operator is the inverse of the - * {@code tf.gather_nd} operator which extracts values or slices from a given tensor. - *

    This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling {@code tf.scatter_nd(indices, values, shape)} is identical - * to {@code tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)} - *

    If {@code indices} contains duplicates, then their updates are accumulated (summed). + * Scatters {@code updates} into a tensor of shape {@code shape} according to {@code indices}. + * Update the input tensor by scattering sparse {@code updates} according to individual values at the specified {@code indices}. + * This op returns an {@code output} tensor with the {@code shape} you specify. This op is the + * inverse of the {@code tf.gather_nd} operator which extracts values or slices from a + * given tensor. + *

    This operation is similar to {@code tf.tensor_scatter_add}, except that the tensor is + * zero-initialized. Calling {@code tf.scatter_nd(indices, values, shape)} + * is identical to calling + * {@code tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)}. + *

    If {@code indices} contains duplicates, the duplicate {@code values} are accumulated + * (summed). *

    WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if {@code indices} contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

    {@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: + * output will be nondeterministic if {@code indices} contains duplicates; + * numbers summed in different order may yield different results because of some + * numerical approximation issues. + *

    {@code indices} is an integer tensor of shape {@code shape}. The last dimension + * of {@code indices} can be at most the rank of {@code shape}: *

        *  indices.shape[-1] <= shape.rank
        *  
    - *

    The last dimension of {@code indices} corresponds to indices into elements + *

    The last dimension of {@code indices} corresponds to indices of elements * (if {@code indices.shape[-1] = shape.rank}) or slices * (if {@code indices.shape[-1] < shape.rank}) along dimension {@code indices.shape[-1]} of - * {@code shape}. {@code updates} is a tensor with shape + * {@code shape}. + *

    {@code updates} is a tensor with shape: *

        *  indices.shape[:-1] + shape[indices.shape[-1]:]
        *  
    - *

    The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. + *

    The simplest form of the scatter op is to insert individual elements in + * a tensor by index. Consider an example where you want to insert 4 scattered + * elements in a rank-1 tensor with 8 elements. *

    * *
    @@ -5169,9 +5170,9 @@ public ScatterMul scatterMul(Operand ref, *
        *  [0, 11, 0, 10, 9, 0, 0, 12]
        *  
    - *

    We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. + *

    You can also insert entire slices of a higher rank tensor all at once. For + * example, you can insert two slices in the first dimension of a rank-3 tensor + * with two matrices of new values. *

    * *
    @@ -5197,9 +5198,9 @@ public ScatterMul scatterMul(Operand ref, * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for {@code output} output - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @param shape 1-D. The shape of the resulting tensor. + * @param indices Tensor of indices. + * @param updates Values to scatter into the output tensor. + * @param shape 1-D. The shape of the output tensor. * @param data type for {@code ScatterNd} output and operands * @param data type for {@code ScatterNd} output and operands * @return a new instance of ScatterNd diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java index 7d610592959..5f19200059a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java @@ -234,10 +234,10 @@ public ApplyAdagradDa applyAdagradDa(Operand var, /** * Update '*var' according to the Adam algorithm. - * $$lr_t := \text{learning_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ - * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ - * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ - * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * * @param data type for {@code out} output * @param var Should be from a Variable(). @@ -691,10 +691,10 @@ public ResourceApplyAdagradDa resourceApplyAdagradDa( /** * Update '*var' according to the Adam algorithm. - * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ - * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * * @param var Should be from a Variable(). * @param m Should be from a Variable(). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java index 8c087f20307..fb1c5e66b71 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java @@ -21,6 +21,7 @@ import org.tensorflow.ConcreteFunction; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.xla.AllReduce; import org.tensorflow.op.xla.BroadcastHelper; import org.tensorflow.op.xla.ClusterOutput; import org.tensorflow.op.xla.Conv; @@ -35,9 +36,11 @@ import org.tensorflow.op.xla.Pad; import org.tensorflow.op.xla.Recv; import org.tensorflow.op.xla.Reduce; +import org.tensorflow.op.xla.ReduceScatter; import org.tensorflow.op.xla.ReduceWindow; import org.tensorflow.op.xla.RemoveDynamicDimensionSize; import org.tensorflow.op.xla.ReplicaId; +import org.tensorflow.op.xla.RngBitGenerator; import org.tensorflow.op.xla.Scatter; import org.tensorflow.op.xla.SelectAndScatter; import org.tensorflow.op.xla.SelfAdjointEig; @@ -75,6 +78,22 @@ public final class XlaOps { this.ops = ops; } + /** + * Wraps the XLA AllReduce operator + * documented at https://www.tensorflow.org/xla/operation_semantics#allreduce. + * + * @param data type for {@code output} output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param reduceOp Reduction computation. + * @param data type for {@code XlaAllReduce} output and operands + * @return a new instance of AllReduce + */ + public AllReduce allReduce(Operand input, + Operand groupAssignment, String reduceOp) { + return AllReduce.create(scope, input, groupAssignment, reduceOp); + } + /** * Helper operator for performing XLA-style broadcasts * Broadcasts {@code lhs} and {@code rhs} to the same rank, by adding size 1 dimensions to @@ -346,6 +365,23 @@ public Reduce reduce(Operand input, Operand initValue return Reduce.create(scope, input, initValue, dimensionsToReduce, reducer); } + /** + * Wraps the XLA ReduceScatter operator + * documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter. + * + * @param data type for {@code output} output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param scatterDimension Dimension to scatter. + * @param reduceOp Reduction computation. + * @param data type for {@code XlaReduceScatter} output and operands + * @return a new instance of ReduceScatter + */ + public ReduceScatter reduceScatter(Operand input, + Operand groupAssignment, Operand scatterDimension, String reduceOp) { + return ReduceScatter.create(scope, input, groupAssignment, scatterDimension, reduceOp); + } + /** * Wraps the XLA ReduceWindow operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . @@ -371,11 +407,9 @@ public ReduceWindow reduceWindow(Operand } /** - * Inverse of XlaSetDynamicDimensionSize. Make an xla bounded - *
    -   *      dynamic dimension into a static dimension. The bound of the size of
    -   *      dimension `dim_index` becomes the static dimension size.
    -   *  
    + * Inverse of XlaSetDynamicDimensionSize. + * Make an xla bounded dynamic dimension into a static dimension. The bound of the + * size of dimension {@code dim_index} becomes the static dimension size. * * @param data type for {@code output} output * @param input The input value @@ -397,6 +431,26 @@ public ReplicaId replicaId() { return ReplicaId.create(scope); } + /** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for {@code output} output + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for {@code XlaRngBitGenerator} output and operands + * @return a new instance of RngBitGenerator + */ + public RngBitGenerator rngBitGenerator(Operand algorithm, + Operand initialState, Operand shape, Class dtype) { + return RngBitGenerator.create(scope, algorithm, initialState, shape, dtype); + } + /** * Wraps the XLA Scatter operator documented at * https://www.tensorflow.org/xla/operation_semantics#scatter. @@ -500,7 +554,9 @@ public SetDynamicDimensionSize setDynamicDimensionSize(Oper } /** - * An op which shards the input based on the given sharding attribute. + * An op which shards the input based on the given sharding attribute. It can + * selectively annotate a subset of tensor dimensions by skipping unspecified_dims, + * and the sharding annotation should be replicated in those dims. * * @param data type for {@code output} output * @param input The input value @@ -533,34 +589,39 @@ public Sort sort(Operand input) { * partitioned) with the same sharding used by manual partitioning, and outputs a * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. + * The conversion can happen partially in subgroups, by specifying the dim + * attribute, where only that dim will be converted. * * @param data type for {@code output} output * @param input The input value * @param manualSharding The value of the manualSharding attribute + * @param options carries optional attribute values * @param data type for {@code XlaSpmdFullToShardShape} output and operands * @return a new instance of SpmdFullToShardShape */ public SpmdFullToShardShape spmdFullToShardShape(Operand input, - String manualSharding) { - return SpmdFullToShardShape.create(scope, input, manualSharding); + String manualSharding, SpmdFullToShardShape.Options... options) { + return SpmdFullToShardShape.create(scope, input, manualSharding, options); } /** * An op used by XLA SPMD partitioner to switch from manual partitioning to * automatic partitioning. It converts the shard-shaped, manually partitioned input * into full-shaped tensor to be partitioned automatically with the same sharding - * used by manual partitioning. + * used by manual partitioning. The conversion can happen partially in subgroups, + * by specifying the dim attribute, where only that dim will be converted. * * @param data type for {@code output} output * @param input The input value * @param manualSharding The value of the manualSharding attribute * @param fullShape The value of the fullShape attribute + * @param options carries optional attribute values * @param data type for {@code XlaSpmdShardToFullShape} output and operands * @return a new instance of SpmdShardToFullShape */ public SpmdShardToFullShape spmdShardToFullShape(Operand input, - String manualSharding, Shape fullShape) { - return SpmdShardToFullShape.create(scope, input, manualSharding, fullShape); + String manualSharding, Shape fullShape, SpmdShardToFullShape.Options... options) { + return SpmdShardToFullShape.create(scope, input, manualSharding, fullShape, options); } /** @@ -691,18 +752,18 @@ public XlaSetBound xlaSetBound(Operand input, Operand bound) { * Wraps the variadic XLA Reduce operator. * Semantics are documented at * https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. + *

    This is an expanded version of XlaVariadicReduce, with support for + * operands of different dtypes, and improved shape inference. * - * @param data type for {@code output} output - * @param input the input tensor(s) - * @param initValue scalar initial value(s) for the reduction + * @param inputs the input tensor(s) + * @param initValues scalar initial value(s) for the reduction * @param dimensionsToReduce dimension numbers over which to reduce * @param reducer a reducer function to apply - * @param data type for {@code XlaVariadicReduce} output and operands * @return a new instance of XlaVariadicReduce */ - public XlaVariadicReduce xlaVariadicReduce(Iterable> input, - Iterable> initValue, List dimensionsToReduce, ConcreteFunction reducer) { - return XlaVariadicReduce.create(scope, input, initValue, dimensionsToReduce, reducer); + public XlaVariadicReduce xlaVariadicReduce(Iterable> inputs, + Iterable> initValues, List dimensionsToReduce, ConcreteFunction reducer) { + return XlaVariadicReduce.create(scope, inputs, initValues, dimensionsToReduce, reducer); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java index 55bcac60f5b..cd3020eef5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java @@ -29,10 +29,10 @@ public class NativeOutput extends Pointer { private native void allocate(); public NativeOutput(Node n) { super((Pointer)null); allocate(n); } private native void allocate(Node n); - public NativeOutput(Node n, @Cast("tensorflow::int32") int index) { super((Pointer)null); allocate(n, index); } - private native void allocate(Node n, @Cast("tensorflow::int32") int index); - public NativeOutput(@Const @ByRef NativeOperation op, @Cast("tensorflow::int32") int index) { super((Pointer)null); allocate(op, index); } - private native void allocate(@Const @ByRef NativeOperation op, @Cast("tensorflow::int32") int index); + public NativeOutput(Node n, int index) { super((Pointer)null); allocate(n, index); } + private native void allocate(Node n, int index); + public NativeOutput(@Const @ByRef NativeOperation op, int index) { super((Pointer)null); allocate(op, index); } + private native void allocate(@Const @ByRef NativeOperation op, int index); public native @ByVal NativeOperation op(); public native Node node(); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java index 7e21aae659b..193d3f86312 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java @@ -56,10 +56,18 @@ public class NativeStatus extends Pointer { * {@code if (overall_status.ok()) overall_status = new_status} * Use: * {@code overall_status.Update(new_status);} */ + + /// public native void Update(@Const @ByRef NativeStatus new_status); /** \brief Return a string representation of this status suitable for - * printing. Returns the string {@code "OK"} for success. */ + * printing. Returns the string {@code "OK"} for success. + * + * By default, it returns combination of the error code name, the message and + * any associated payload messages. This string is designed simply to be + * human readable and its exact format should not be load bearing. Do not + * depend on the exact format of the result of {@code ToString()} which is subject + * to change. */ public native @StdString BytePointer ToString(); // Ignores any errors. This method does nothing except potentially suppress @@ -67,11 +75,44 @@ public class NativeStatus extends Pointer { // the floor. public native void IgnoreError(); + //---------------------------------------------------------------------------- + // Payload Management APIs (Cloned from absl::Status) + //---------------------------------------------------------------------------- + // A payload may be attached to a status to provide additional context to an + // error that may not be satisfied by an existing `tensorflow::error::Code`. + // Typically, this payload serves one of several purposes: + // + // * It may provide more fine-grained semantic information about the error + // to facilitate actionable remedies. + // * It may provide human-readable contexual information that is more + // appropriate to display to an end user. + // + // A payload consists of a [key,value] pair, where the key is a string + // referring to a unique "type URL" and the value is an object of type + // `absl::Cord` to hold the contextual data. + // + // The "type URL" should be unique and follow the format of a URL + // (https://en.wikipedia.org/wiki/URL) and, ideally, provide some + // documentation or schema on how to interpret its associated data. For + // example, the default type URL for a protobuf message type is + // "type.googleapis.com/packagename.messagename". Other custom wire formats + // should define the format of type URL in a similar practice so as to + // minimize the chance of conflict between type URLs. + // Users should ensure that the type URL can be mapped to a concrete + // C++ type if they want to deserialize the payload and read it effectively. + // + // To attach a payload to a status object, call `Status::SetPayload()`, + // passing it the type URL and an `absl::Cord` of associated data. Similarly, + // to extract the payload from a status, call `Status::GetPayload()`. You + // may attach multiple payloads (with differing type URLs) to any given + // status object, provided that the status is currently exhibiting an error + // code (i.e. is not OK). + // TODO(b/197552541): Use absl::Cord for payload value type. + // The Payload-related APIs are cloned from absl::Status. // // Returns the payload of a status given its unique `type_url` key, if - // present. Returns an empty StringPiece if the status is ok, or if the key is - // not present. + // present. // Sets the payload for a non-ok status using a `type_url` key, overwriting @@ -84,11 +125,11 @@ public class NativeStatus extends Pointer { // the payload was present. - // Returns all the payload information. - // Returns an empty result if status is ok. - - - // Copies all the payloads using the input and discards existing payloads. - // Does nothing if status is ok or 'payloads' is empty. + // Iterates over the stored payloads and calls the + // `visitor(type_key, payload)` callable for each one. + // + // The order of calls to `visitor()` is not specified and may change at + // any time and any mutation on the same Status object during visitation is + // forbidden and could result in undefined behavior. } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java index f3baf914963..0bb4543d41c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java @@ -67,6 +67,7 @@ public class Node extends Pointer { // Sets 'original_node_names' field of this node's DebugInfo proto to // 'names'. + // Read only access to attributes diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java index 3128389e255..0e153289dff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java @@ -65,7 +65,7 @@ public class tensorflow extends org.tensorflow.internal.c_api.presets.tensorflow // #endif // #if TF_TSTRING_LITTLE_ENDIAN -// #define TF_le32toh(x) TF_swap32(x) +// #define TF_le32toh(x) x // #else // TF_TSTRING_LITTLE_ENDIAN // #endif // TF_TSTRING_LITTLE_ENDIAN @@ -584,6 +584,14 @@ public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, String msg); +// Record as a payload in *s. The previous payload having the +// same key (if any) is overwritten. Payload will not be added if the Status +// is OK. +public static native void TF_SetPayload(TF_Status s, @Cast("const char*") BytePointer key, + @Cast("const char*") BytePointer value); +public static native void TF_SetPayload(TF_Status s, String key, + String value); + // Convert from an I/O error code (e.g., errno) to a TF_Status value. // Any previous information is lost. Prefer to use this instead of TF_SetStatus // when the error comes from I/O operations. @@ -957,6 +965,10 @@ public static native TF_Buffer TF_NewBufferFromString(@Const Pointer proto, public static native void TF_DeleteBuffer(TF_Buffer arg0); public static native @ByVal TF_Buffer TF_GetBuffer(TF_Buffer buffer); + +// Parsing a serialized TensorProto into a TF_Tensor. +public static native void TF_TensorFromProto(@Const TF_Buffer from, + TF_Tensor to, TF_Status status); // Targeting ../TF_StringView.java @@ -1091,14 +1103,16 @@ public static native void TF_GraphGetTensorShape(TF_Graph graph, @Cast("int64_t*") long[] dims, int num_dims, TF_Status status); -// TF_NewOperation, but without locking the graph. -// Should prefer TF_NewOperation when possible. -public static native TF_OperationDescription TF_NewOperationLocked(TF_Graph graph, - @Cast("const char*") BytePointer op_type, - @Cast("const char*") BytePointer oper_name); -public static native TF_OperationDescription TF_NewOperationLocked(TF_Graph graph, - String op_type, - String oper_name); +// Creates a new operation - see `TF_NewOperation` for more details. +// +// The lock for `graph` must be held when calling this function. +// +// Unless implementing advanced behavior, like custom gradient functions, you +// most likely need to call `TF_NewOperation` instead. +public static native TF_OperationDescription TF_NewOperationLocked( + TF_Graph graph, @Cast("const char*") BytePointer op_type, @Cast("const char*") BytePointer oper_name); +public static native TF_OperationDescription TF_NewOperationLocked( + TF_Graph graph, String op_type, String oper_name); // Operation will only be added to *graph when TF_FinishOperation() is // called (assuming TF_FinishOperation() does not return an error). @@ -1439,10 +1453,14 @@ public static native void TF_SetAttrValueProto(TF_OperationDescription desc, @Cast("size_t") long proto_len, TF_Status status); -// TF_FinishOperation, but without locking the graph. -// TF_FinishOperation should be preferred when possible. -public static native TF_Operation TF_FinishOperationLocked(TF_OperationDescription desc, - TF_Status status); +// Adds this operation to the graph - see `TF_FinishOperation` for more details. +// +// The lock for `graph` must be held when calling this function. +// +// Unless implementing advanced behavior, like custom gradient functions, you +// most likely need to call `TF_FinishOperation` instead. +public static native TF_Operation TF_FinishOperationLocked( + TF_OperationDescription desc, TF_Status status); // If this function succeeds: // * *status is set to an OK value, @@ -1970,6 +1988,26 @@ public static native void TF_OperationGetAttrValueProto( TF_Operation oper, String attr_name, TF_Buffer output_attr_value, TF_Status status); +// Get the number of attributes the operation has. +public static native int TF_OperationGetNumAttrs(TF_Operation oper); + +// Get the length of the name of the ith attribute, or -1 if there is not an +// ith attribute. +public static native int TF_OperationGetAttrNameLength(TF_Operation oper, + int i); + +// Get the name of the ith attribute. output should have the size of +// TF_OperationGetAttrNameLength(oper, i). +public static native void TF_OperationGetAttrName(TF_Operation oper, int i, + @Cast("char*") BytePointer output, + TF_Status status); +public static native void TF_OperationGetAttrName(TF_Operation oper, int i, + @Cast("char*") ByteBuffer output, + TF_Status status); +public static native void TF_OperationGetAttrName(TF_Operation oper, int i, + @Cast("char*") byte[] output, + TF_Status status); + // Returns the operation in the graph with `oper_name`. Returns nullptr if // no operation found. public static native TF_Operation TF_GraphOperationByName( @@ -3650,15 +3688,15 @@ public static native void TF_OpKernelConstruction_GetAttrStringList( // compute function. public static native TF_Tensor TF_AllocateOutput(TF_OpKernelContext context, int index, @Cast("TF_DataType") int dtype, - @Cast("int64_t*") LongPointer dims, int num_dims, + @Cast("const int64_t*") LongPointer dims, int num_dims, @Cast("size_t") long len, TF_Status status); public static native TF_Tensor TF_AllocateOutput(TF_OpKernelContext context, int index, @Cast("TF_DataType") int dtype, - @Cast("int64_t*") LongBuffer dims, int num_dims, + @Cast("const int64_t*") LongBuffer dims, int num_dims, @Cast("size_t") long len, TF_Status status); public static native TF_Tensor TF_AllocateOutput(TF_OpKernelContext context, int index, @Cast("TF_DataType") int dtype, - @Cast("int64_t*") long[] dims, int num_dims, + @Cast("const int64_t*") long[] dims, int num_dims, @Cast("size_t") long len, TF_Status status); // Tries to forward one of the inputs given in input_indices to @@ -3669,16 +3707,19 @@ public static native TF_Tensor TF_AllocateOutput(TF_OpKernelContext context, // -1. public static native TF_Tensor TF_ForwardInputOrAllocateOutput( TF_OpKernelContext context, IntPointer candidate_input_indices, - int num_candidate_input_indices, int output_index, @Cast("int64_t*") LongPointer output_dims, - int output_num_dims, IntPointer forwarded_input, TF_Status status); + int num_candidate_input_indices, int output_index, + @Cast("const int64_t*") LongPointer output_dims, int output_num_dims, IntPointer forwarded_input, + TF_Status status); public static native TF_Tensor TF_ForwardInputOrAllocateOutput( TF_OpKernelContext context, IntBuffer candidate_input_indices, - int num_candidate_input_indices, int output_index, @Cast("int64_t*") LongBuffer output_dims, - int output_num_dims, IntBuffer forwarded_input, TF_Status status); + int num_candidate_input_indices, int output_index, + @Cast("const int64_t*") LongBuffer output_dims, int output_num_dims, IntBuffer forwarded_input, + TF_Status status); public static native TF_Tensor TF_ForwardInputOrAllocateOutput( TF_OpKernelContext context, int[] candidate_input_indices, - int num_candidate_input_indices, int output_index, @Cast("int64_t*") long[] output_dims, - int output_num_dims, int[] forwarded_input, TF_Status status); + int num_candidate_input_indices, int output_index, + @Cast("const int64_t*") long[] output_dims, int output_num_dims, int[] forwarded_input, + TF_Status status); // Allocates a temporary Tensor of the specified type and shape. The // Tensor must not be used after kernel construction is @@ -3686,14 +3727,14 @@ public static native TF_Tensor TF_ForwardInputOrAllocateOutput( // // num_dims must equal the size of array dims public static native TF_Tensor TF_AllocateTemp( - TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("int64_t*") LongPointer dims, int num_dims, - TF_AllocatorAttributes alloc_attrs, TF_Status status); + TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("const int64_t*") LongPointer dims, + int num_dims, TF_AllocatorAttributes alloc_attrs, TF_Status status); public static native TF_Tensor TF_AllocateTemp( - TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("int64_t*") LongBuffer dims, int num_dims, - TF_AllocatorAttributes alloc_attrs, TF_Status status); + TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("const int64_t*") LongBuffer dims, + int num_dims, TF_AllocatorAttributes alloc_attrs, TF_Status status); public static native TF_Tensor TF_AllocateTemp( - TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("int64_t*") long[] dims, int num_dims, - TF_AllocatorAttributes alloc_attrs, TF_Status status); + TF_OpKernelContext context, @Cast("TF_DataType") int dtype, @Cast("const int64_t*") long[] dims, + int num_dims, TF_AllocatorAttributes alloc_attrs, TF_Status status); // #ifdef __cplusplus /* end extern "C" */ // #endif @@ -4959,9 +5000,11 @@ public static native void TFE_OpSetAttrValueProto(@Const TFE_Op op, // #include // #include // #include +// #include // #include // #include +// #include "absl/types/optional.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/stack_frame.h" diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java deleted file mode 100644 index 53e72874745..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/AllReduce.java +++ /dev/null @@ -1,302 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.collective; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.framework.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * Mutually reduces multiple tensors of identical type and shape. - * - * @param data type for {@code data} output - * - * @deprecated use {@link org.tensorflow.op.collective.Reduce} instead - */ -@OpMetadata( - opType = AllReduce.OP_NAME, - inputsClass = AllReduce.Inputs.class -) -@Deprecated -public final class AllReduce extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "CollectiveReduce"; - - private Output data; - - public AllReduce(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - data = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new CollectiveReduce operation. - * - * @param scope current scope - * @param input The input value - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param mergeOp The value of the mergeOp attribute - * @param finalOp The value of the finalOp attribute - * @param subdivOffsets The value of the subdivOffsets attribute - * @param options carries optional attribute values - * @param data type for {@code CollectiveReduce} output and operands - * @return a new instance of AllReduce - */ - @Endpoint( - describeByClass = true - ) - public static AllReduce create(Scope scope, Operand input, - Long groupSize, Long groupKey, Long instanceKey, String mergeOp, String finalOp, - List subdivOffsets, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AllReduce"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("merge_op", mergeOp); - opBuilder.setAttr("final_op", finalOp); - long[] subdivOffsetsArray = new long[subdivOffsets.size()]; - for (int i = 0 ; i < subdivOffsetsArray.length ; i++) { - subdivOffsetsArray[i] = subdivOffsets.get(i); - } - opBuilder.setAttr("subdiv_offsets", subdivOffsetsArray); - if (options != null) { - for (Options opts : options) { - if (opts.waitFor != null) { - long[] waitForArray = new long[opts.waitFor.size()]; - for (int i = 0 ; i < waitForArray.length ; i++) { - waitForArray[i] = opts.waitFor.get(i); - } - opBuilder.setAttr("wait_for", waitForArray); - } - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } - if (opts.timeoutSeconds != null) { - opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); - } - } - } - return new AllReduce<>(opBuilder.build()); - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public static Options waitFor(List waitFor) { - return new Options().waitFor(waitFor); - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public static Options waitFor(Long... waitFor) { - return new Options().waitFor(waitFor); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public static Options timeoutSeconds(Float timeoutSeconds) { - return new Options().timeoutSeconds(timeoutSeconds); - } - - /** - * Gets data. - * - * @return data. - */ - public Output data() { - return data; - } - - @Override - public Output asOutput() { - return data; - } - - /** - * Optional attributes for {@link org.tensorflow.op.collective.AllReduce} - */ - public static class Options { - private List waitFor; - - private String communicationHint; - - private Float timeoutSeconds; - - private Options() { - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public Options waitFor(List waitFor) { - this.waitFor = waitFor; - return this; - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public Options waitFor(Long... waitFor) { - this.waitFor = Arrays.asList(waitFor); - return this; - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public Options timeoutSeconds(Float timeoutSeconds) { - this.timeoutSeconds = timeoutSeconds; - return this; - } - } - - @OpInputsMetadata( - outputsClass = AllReduce.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The groupSize attribute - */ - public final long groupSize; - - /** - * The groupKey attribute - */ - public final long groupKey; - - /** - * The instanceKey attribute - */ - public final long instanceKey; - - /** - * The mergeOp attribute - */ - public final String mergeOp; - - /** - * The finalOp attribute - */ - public final String finalOp; - - /** - * The subdivOffsets attribute - */ - public final long[] subdivOffsets; - - /** - * The waitFor attribute - */ - public final long[] waitFor; - - /** - * The communicationHint attribute - */ - public final String communicationHint; - - /** - * The timeoutSeconds attribute - */ - public final float timeoutSeconds; - - public Inputs(GraphOperation op) { - super(new AllReduce<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "merge_op", "final_op", "subdiv_offsets", "wait_for", "communication_hint", "timeout_seconds")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - mergeOp = op.attributes().getAttrString("merge_op"); - finalOp = op.attributes().getAttrString("final_op"); - subdivOffsets = op.attributes().getAttrIntList("subdiv_offsets"); - waitFor = op.attributes().getAttrIntList("wait_for"); - communicationHint = op.attributes().getAttrString("communication_hint"); - timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java similarity index 52% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java index cc18c528581..4cd48666b57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastSend.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java @@ -23,7 +23,6 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; -import org.tensorflow.ndarray.Shape; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; @@ -31,76 +30,61 @@ import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; /** - * Broadcasts a tensor value to one or more other devices. + * Mutually exchanges multiple tensors of identical type and shape. * * @param data type for {@code data} output */ @OpMetadata( - opType = BroadcastSend.OP_NAME, - inputsClass = BroadcastSend.Inputs.class + opType = CollectiveAllToAll.OP_NAME, + inputsClass = CollectiveAllToAll.Inputs.class ) -public final class BroadcastSend extends RawOp implements Operand { +public final class CollectiveAllToAll extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "CollectiveBcastSend"; + public static final String OP_NAME = "CollectiveAllToAllV3"; private Output data; - public BroadcastSend(Operation operation) { + public CollectiveAllToAll(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new CollectiveBcastSend operation. + * Factory method to create a class wrapping a new CollectiveAllToAllV3 operation. * * @param scope current scope * @param input The input value - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param shape The value of the shape attribute + * @param communicator The communicator value + * @param groupAssignment The groupAssignment value * @param options carries optional attribute values - * @param data type for {@code CollectiveBcastSend} output and operands - * @return a new instance of BroadcastSend + * @param data type for {@code CollectiveAllToAllV3} output and operands + * @return a new instance of CollectiveAllToAll */ @Endpoint( describeByClass = true ) - public static BroadcastSend create(Scope scope, Operand input, - Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BroadcastSend"); + public static CollectiveAllToAll create(Scope scope, Operand input, + Operand communicator, Operand groupAssignment, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveAllToAll"); opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("shape", shape); + opBuilder.addInput(communicator.asOutput()); + opBuilder.addInput(groupAssignment.asOutput()); if (options != null) { for (Options opts : options) { - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } if (opts.timeoutSeconds != null) { opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); } } } - return new BroadcastSend<>(opBuilder.build()); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); + return new CollectiveAllToAll<>(opBuilder.build()); } /** @@ -128,27 +112,14 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.collective.BroadcastSend} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveAllToAll} */ public static class Options { - private String communicationHint; - private Float timeoutSeconds; private Options() { } - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - /** * Sets the timeoutSeconds option. * @@ -162,43 +133,28 @@ public Options timeoutSeconds(Float timeoutSeconds) { } @OpInputsMetadata( - outputsClass = BroadcastSend.class + outputsClass = CollectiveAllToAll.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The input input */ public final Operand input; /** - * The T attribute - */ - public final DataType T; - - /** - * The groupSize attribute - */ - public final long groupSize; - - /** - * The groupKey attribute + * The communicator input */ - public final long groupKey; + public final Operand communicator; /** - * The instanceKey attribute + * The groupAssignment input */ - public final long instanceKey; + public final Operand groupAssignment; /** - * The shape attribute - */ - public final Shape shape; - - /** - * The communicationHint attribute + * The T attribute */ - public final String communicationHint; + public final DataType T; /** * The timeoutSeconds attribute @@ -206,15 +162,12 @@ public static class Inputs extends RawOpInputs public final float timeoutSeconds; public Inputs(GraphOperation op) { - super(new BroadcastSend<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "shape", "communication_hint", "timeout_seconds")); + super(new CollectiveAllToAll<>(op), op, Arrays.asList("T", "timeout_seconds")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); + communicator = (Operand) op.input(inputIndex++); + groupAssignment = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - shape = op.attributes().getAttrShape("shape"); - communicationHint = op.attributes().getAttrString("communication_hint"); timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java similarity index 88% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java index e189877adb8..daf3fbd2a2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastRecvV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.collective; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -41,10 +41,10 @@ * @param data type for {@code data} output */ @OpMetadata( - opType = CollectiveBcastRecvV2.OP_NAME, - inputsClass = CollectiveBcastRecvV2.Inputs.class + opType = CollectiveBcastRecv.OP_NAME, + inputsClass = CollectiveBcastRecv.Inputs.class ) -public final class CollectiveBcastRecvV2 extends RawOp implements Operand { +public final class CollectiveBcastRecv extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ @@ -52,7 +52,7 @@ public final class CollectiveBcastRecvV2 extends RawOp implemen private Output data; - public CollectiveBcastRecvV2(Operation operation) { + public CollectiveBcastRecv(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); @@ -69,15 +69,15 @@ public CollectiveBcastRecvV2(Operation operation) { * @param T The value of the T attribute * @param options carries optional attribute values * @param data type for {@code CollectiveBcastRecvV2} output and operands - * @return a new instance of CollectiveBcastRecvV2 + * @return a new instance of CollectiveBcastRecv */ @Endpoint( describeByClass = true ) - public static CollectiveBcastRecvV2 create(Scope scope, + public static CollectiveBcastRecv create(Scope scope, Operand groupSize, Operand groupKey, Operand instanceKey, Operand shape, Class T, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveBcastRecvV2"); + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveBcastRecv"); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); opBuilder.addInput(instanceKey.asOutput()); @@ -93,7 +93,7 @@ public static CollectiveBcastRecvV2 create(Scope scope, } } } - return new CollectiveBcastRecvV2<>(opBuilder.build()); + return new CollectiveBcastRecv<>(opBuilder.build()); } /** @@ -131,7 +131,7 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.rawops.CollectiveBcastRecvV2} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveBcastRecv} */ public static class Options { private String communicationHint; @@ -165,9 +165,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } @OpInputsMetadata( - outputsClass = CollectiveBcastRecvV2.class + outputsClass = CollectiveBcastRecv.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The groupSize input */ @@ -209,7 +209,7 @@ public static class Inputs extends RawOpInputs> { public final float timeoutSeconds; public Inputs(GraphOperation op) { - super(new CollectiveBcastRecvV2<>(op), op, Arrays.asList("T", "Tshape", "communication_hint", "timeout_seconds")); + super(new CollectiveBcastRecv<>(op), op, Arrays.asList("T", "Tshape", "communication_hint", "timeout_seconds")); int inputIndex = 0; groupSize = (Operand) op.input(inputIndex++); groupKey = (Operand) op.input(inputIndex++); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java similarity index 87% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java index 68a56685261..49c8fed8b0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/CollectiveBcastSendV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.collective; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -39,10 +39,10 @@ * @param data type for {@code data} output */ @OpMetadata( - opType = CollectiveBcastSendV2.OP_NAME, - inputsClass = CollectiveBcastSendV2.Inputs.class + opType = CollectiveBcastSend.OP_NAME, + inputsClass = CollectiveBcastSend.Inputs.class ) -public final class CollectiveBcastSendV2 extends RawOp implements Operand { +public final class CollectiveBcastSend extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ @@ -50,7 +50,7 @@ public final class CollectiveBcastSendV2 extends RawOp implemen private Output data; - public CollectiveBcastSendV2(Operation operation) { + public CollectiveBcastSend(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); @@ -66,15 +66,15 @@ public CollectiveBcastSendV2(Operation operation) { * @param instanceKey The instanceKey value * @param options carries optional attribute values * @param data type for {@code CollectiveBcastSendV2} output and operands - * @return a new instance of CollectiveBcastSendV2 + * @return a new instance of CollectiveBcastSend */ @Endpoint( describeByClass = true ) - public static CollectiveBcastSendV2 create(Scope scope, Operand input, + public static CollectiveBcastSend create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveBcastSendV2"); + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveBcastSend"); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); @@ -89,7 +89,7 @@ public static CollectiveBcastSendV2 create(Scope scope, Ope } } } - return new CollectiveBcastSendV2<>(opBuilder.build()); + return new CollectiveBcastSend<>(opBuilder.build()); } /** @@ -127,7 +127,7 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.rawops.CollectiveBcastSendV2} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveBcastSend} */ public static class Options { private String communicationHint; @@ -161,9 +161,9 @@ public Options timeoutSeconds(Float timeoutSeconds) { } @OpInputsMetadata( - outputsClass = CollectiveBcastSendV2.class + outputsClass = CollectiveBcastSend.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The input input */ @@ -200,7 +200,7 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "communication_hint", "timeout_seconds")); + super(new CollectiveBcastSend<>(op), op, Arrays.asList("T", "communication_hint", "timeout_seconds")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); groupSize = (Operand) op.input(inputIndex++); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java similarity index 90% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java index 2ee7eda0773..ff5931d704d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/GatherV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java @@ -41,10 +41,10 @@ * @param data type for {@code data} output */ @OpMetadata( - opType = GatherV2.OP_NAME, - inputsClass = GatherV2.Inputs.class + opType = CollectiveGather.OP_NAME, + inputsClass = CollectiveGather.Inputs.class ) -public final class GatherV2 extends RawOp implements Operand { +public final class CollectiveGather extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ @@ -52,7 +52,7 @@ public final class GatherV2 extends RawOp implements Operand< private Output data; - public GatherV2(Operation operation) { + public CollectiveGather(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); @@ -69,15 +69,15 @@ public GatherV2(Operation operation) { * @param orderingToken The orderingToken value * @param options carries optional attribute values * @param data type for {@code CollectiveGatherV2} output and operands - * @return a new instance of GatherV2 + * @return a new instance of CollectiveGather */ @Endpoint( describeByClass = true ) - public static GatherV2 create(Scope scope, Operand input, + public static CollectiveGather create(Scope scope, Operand input, Operand groupSize, Operand groupKey, Operand instanceKey, Iterable> orderingToken, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GatherV2"); + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveGather"); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupSize.asOutput()); opBuilder.addInput(groupKey.asOutput()); @@ -96,7 +96,7 @@ public static GatherV2 create(Scope scope, Operand inp } } } - return new GatherV2<>(opBuilder.build()); + return new CollectiveGather<>(opBuilder.build()); } /** @@ -144,7 +144,7 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.collective.GatherV2} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveGather} */ public static class Options { private String communicationHint; @@ -191,9 +191,9 @@ public Options NorderingToken(Long NorderingToken) { } @OpInputsMetadata( - outputsClass = GatherV2.class + outputsClass = CollectiveGather.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The input input */ @@ -235,7 +235,7 @@ public static class Inputs extends RawOpInputs> { public final float timeoutSeconds; public Inputs(GraphOperation op) { - super(new GatherV2<>(op), op, Arrays.asList("T", "communication_hint", "timeout_seconds")); + super(new CollectiveGather<>(op), op, Arrays.asList("T", "communication_hint", "timeout_seconds")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); groupSize = (Operand) op.input(inputIndex++); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java similarity index 59% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java index 94dbc145772..b8c068c9f2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/BroadcastRecv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveInitializeCommunicator.java @@ -23,64 +23,56 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; -import org.tensorflow.ndarray.Shape; -import org.tensorflow.op.Operands; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TType; /** - * Receives a tensor value broadcast from another device. - * - * @param data type for {@code data} output + * Initializes a group for collective operations. */ @OpMetadata( - opType = BroadcastRecv.OP_NAME, - inputsClass = BroadcastRecv.Inputs.class + opType = CollectiveInitializeCommunicator.OP_NAME, + inputsClass = CollectiveInitializeCommunicator.Inputs.class ) -public final class BroadcastRecv extends RawOp implements Operand { +public final class CollectiveInitializeCommunicator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "CollectiveBcastRecv"; + public static final String OP_NAME = "CollectiveInitializeCommunicator"; - private Output data; + private Output communicator; - public BroadcastRecv(Operation operation) { + @SuppressWarnings("unchecked") + public CollectiveInitializeCommunicator(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; - data = operation.output(outputIdx++); + communicator = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new CollectiveBcastRecv operation. + * Factory method to create a class wrapping a new CollectiveInitializeCommunicator operation. * * @param scope current scope - * @param T The value of the T attribute - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param shape The value of the shape attribute + * @param groupKey The groupKey value + * @param rank The rank value + * @param groupSize The groupSize value * @param options carries optional attribute values - * @param data type for {@code CollectiveBcastRecv} output and operands - * @return a new instance of BroadcastRecv + * @return a new instance of CollectiveInitializeCommunicator */ @Endpoint( describeByClass = true ) - public static BroadcastRecv create(Scope scope, Class T, Long groupSize, - Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "BroadcastRecv"); - opBuilder.setAttr("T", Operands.toDataType(T)); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("shape", shape); + public static CollectiveInitializeCommunicator create(Scope scope, Operand groupKey, + Operand rank, Operand groupSize, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveInitializeCommunicator"); + opBuilder.addInput(groupKey.asOutput()); + opBuilder.addInput(rank.asOutput()); + opBuilder.addInput(groupSize.asOutput()); if (options != null) { for (Options opts : options) { if (opts.communicationHint != null) { @@ -91,7 +83,7 @@ public static BroadcastRecv create(Scope scope, Class T, } } } - return new BroadcastRecv<>(opBuilder.build()); + return new CollectiveInitializeCommunicator(opBuilder.build()); } /** @@ -115,21 +107,22 @@ public static Options timeoutSeconds(Float timeoutSeconds) { } /** - * Gets data. + * Gets communicator. * - * @return data. + * @return communicator. */ - public Output data() { - return data; + public Output communicator() { + return communicator; } @Override - public Output asOutput() { - return data; + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) communicator; } /** - * Optional attributes for {@link org.tensorflow.op.collective.BroadcastRecv} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveInitializeCommunicator} */ public static class Options { private String communicationHint; @@ -163,33 +156,23 @@ public Options timeoutSeconds(Float timeoutSeconds) { } @OpInputsMetadata( - outputsClass = BroadcastRecv.class + outputsClass = CollectiveInitializeCommunicator.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs { /** - * The T attribute + * The groupKey input */ - public final DataType T; + public final Operand groupKey; /** - * The groupSize attribute + * The rank input */ - public final long groupSize; + public final Operand rank; /** - * The groupKey attribute + * The groupSize input */ - public final long groupKey; - - /** - * The instanceKey attribute - */ - public final long instanceKey; - - /** - * The shape attribute - */ - public final Shape shape; + public final Operand groupSize; /** * The communicationHint attribute @@ -202,13 +185,11 @@ public static class Inputs extends RawOpInputs> { public final float timeoutSeconds; public Inputs(GraphOperation op) { - super(new BroadcastRecv<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "shape", "communication_hint", "timeout_seconds")); + super(new CollectiveInitializeCommunicator(op), op, Arrays.asList("communication_hint", "timeout_seconds")); int inputIndex = 0; - T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - shape = op.attributes().getAttrShape("shape"); + groupKey = (Operand) op.input(inputIndex++); + rank = (Operand) op.input(inputIndex++); + groupSize = (Operand) op.input(inputIndex++); communicationHint = op.attributes().getAttrString("communication_hint"); timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java similarity index 99% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java index c98049a5299..7742374fef0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CollectivePermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.tpu; +package org.tensorflow.op.collective; import java.util.Arrays; import org.tensorflow.GraphOperation; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java similarity index 54% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java index 9140ca240ca..28273d9a76a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java @@ -23,7 +23,6 @@ import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; -import org.tensorflow.ndarray.Shape; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.Scope; @@ -31,76 +30,64 @@ import org.tensorflow.op.annotation.OpInputsMetadata; import org.tensorflow.op.annotation.OpMetadata; import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; /** - * Mutually accumulates multiple tensors of identical type and shape. + * Mutually reduces multiple tensors of identical type and shape. * * @param data type for {@code data} output */ @OpMetadata( - opType = Gather.OP_NAME, - inputsClass = Gather.Inputs.class + opType = CollectiveReduce.OP_NAME, + inputsClass = CollectiveReduce.Inputs.class ) -public final class Gather extends RawOp implements Operand { +public final class CollectiveReduce extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "CollectiveGather"; + public static final String OP_NAME = "CollectiveReduceV3"; private Output data; - public Gather(Operation operation) { + public CollectiveReduce(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; data = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new CollectiveGather operation. + * Factory method to create a class wrapping a new CollectiveReduceV3 operation. * * @param scope current scope * @param input The input value - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param shape The value of the shape attribute + * @param communicator The communicator value + * @param groupAssignment The groupAssignment value + * @param reduction The value of the reduction attribute * @param options carries optional attribute values - * @param data type for {@code CollectiveGather} output and operands - * @return a new instance of Gather + * @param data type for {@code CollectiveReduceV3} output and operands + * @return a new instance of CollectiveReduce */ @Endpoint( describeByClass = true ) - public static Gather create(Scope scope, Operand input, Long groupSize, - Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "Gather"); + public static CollectiveReduce create(Scope scope, Operand input, + Operand communicator, Operand groupAssignment, String reduction, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveReduce"); opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("shape", shape); + opBuilder.addInput(communicator.asOutput()); + opBuilder.addInput(groupAssignment.asOutput()); + opBuilder.setAttr("reduction", reduction); if (options != null) { for (Options opts : options) { - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } if (opts.timeoutSeconds != null) { opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); } } } - return new Gather<>(opBuilder.build()); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); + return new CollectiveReduce<>(opBuilder.build()); } /** @@ -128,27 +115,14 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.collective.Gather} + * Optional attributes for {@link org.tensorflow.op.collective.CollectiveReduce} */ public static class Options { - private String communicationHint; - private Float timeoutSeconds; private Options() { } - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - /** * Sets the timeoutSeconds option. * @@ -162,43 +136,33 @@ public Options timeoutSeconds(Float timeoutSeconds) { } @OpInputsMetadata( - outputsClass = Gather.class + outputsClass = CollectiveReduce.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs> { /** * The input input */ public final Operand input; /** - * The T attribute - */ - public final DataType T; - - /** - * The groupSize attribute - */ - public final long groupSize; - - /** - * The groupKey attribute + * The communicator input */ - public final long groupKey; + public final Operand communicator; /** - * The instanceKey attribute + * The groupAssignment input */ - public final long instanceKey; + public final Operand groupAssignment; /** - * The shape attribute + * The T attribute */ - public final Shape shape; + public final DataType T; /** - * The communicationHint attribute + * The reduction attribute */ - public final String communicationHint; + public final String reduction; /** * The timeoutSeconds attribute @@ -206,15 +170,13 @@ public static class Inputs extends RawOpInputs> { public final float timeoutSeconds; public Inputs(GraphOperation op) { - super(new Gather<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "shape", "communication_hint", "timeout_seconds")); + super(new CollectiveReduce<>(op), op, Arrays.asList("T", "reduction", "timeout_seconds")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); + communicator = (Operand) op.input(inputIndex++); + groupAssignment = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - shape = op.attributes().getAttrShape("shape"); - communicationHint = op.attributes().getAttrString("communication_hint"); + reduction = op.attributes().getAttrString("reduction"); timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java deleted file mode 100644 index 7d503474d02..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/Reduce.java +++ /dev/null @@ -1,299 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.collective; - -import java.util.Arrays; -import java.util.List; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.framework.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * Mutually reduces multiple tensors of identical type and shape. - * - * @param data type for {@code data} output - */ -@OpMetadata( - opType = Reduce.OP_NAME, - inputsClass = Reduce.Inputs.class -) -public final class Reduce extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "CollectiveReduce"; - - private Output data; - - public Reduce(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - data = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new CollectiveReduce operation. - * - * @param scope current scope - * @param input The input value - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param mergeOp The value of the mergeOp attribute - * @param finalOp The value of the finalOp attribute - * @param subdivOffsets The value of the subdivOffsets attribute - * @param options carries optional attribute values - * @param data type for {@code CollectiveReduce} output and operands - * @return a new instance of Reduce - */ - @Endpoint( - describeByClass = true - ) - public static Reduce create(Scope scope, Operand input, Long groupSize, - Long groupKey, Long instanceKey, String mergeOp, String finalOp, List subdivOffsets, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "Reduce"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("merge_op", mergeOp); - opBuilder.setAttr("final_op", finalOp); - long[] subdivOffsetsArray = new long[subdivOffsets.size()]; - for (int i = 0 ; i < subdivOffsetsArray.length ; i++) { - subdivOffsetsArray[i] = subdivOffsets.get(i); - } - opBuilder.setAttr("subdiv_offsets", subdivOffsetsArray); - if (options != null) { - for (Options opts : options) { - if (opts.waitFor != null) { - long[] waitForArray = new long[opts.waitFor.size()]; - for (int i = 0 ; i < waitForArray.length ; i++) { - waitForArray[i] = opts.waitFor.get(i); - } - opBuilder.setAttr("wait_for", waitForArray); - } - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } - if (opts.timeoutSeconds != null) { - opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); - } - } - } - return new Reduce<>(opBuilder.build()); - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public static Options waitFor(List waitFor) { - return new Options().waitFor(waitFor); - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public static Options waitFor(Long... waitFor) { - return new Options().waitFor(waitFor); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public static Options timeoutSeconds(Float timeoutSeconds) { - return new Options().timeoutSeconds(timeoutSeconds); - } - - /** - * Gets data. - * - * @return data. - */ - public Output data() { - return data; - } - - @Override - public Output asOutput() { - return data; - } - - /** - * Optional attributes for {@link org.tensorflow.op.collective.Reduce} - */ - public static class Options { - private List waitFor; - - private String communicationHint; - - private Float timeoutSeconds; - - private Options() { - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public Options waitFor(List waitFor) { - this.waitFor = waitFor; - return this; - } - - /** - * Sets the waitFor option. - * - * @param waitFor the waitFor option - * @return this Options instance. - */ - public Options waitFor(Long... waitFor) { - this.waitFor = Arrays.asList(waitFor); - return this; - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public Options timeoutSeconds(Float timeoutSeconds) { - this.timeoutSeconds = timeoutSeconds; - return this; - } - } - - @OpInputsMetadata( - outputsClass = Reduce.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The groupSize attribute - */ - public final long groupSize; - - /** - * The groupKey attribute - */ - public final long groupKey; - - /** - * The instanceKey attribute - */ - public final long instanceKey; - - /** - * The mergeOp attribute - */ - public final String mergeOp; - - /** - * The finalOp attribute - */ - public final String finalOp; - - /** - * The subdivOffsets attribute - */ - public final long[] subdivOffsets; - - /** - * The waitFor attribute - */ - public final long[] waitFor; - - /** - * The communicationHint attribute - */ - public final String communicationHint; - - /** - * The timeoutSeconds attribute - */ - public final float timeoutSeconds; - - public Inputs(GraphOperation op) { - super(new Reduce<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "merge_op", "final_op", "subdiv_offsets", "wait_for", "communication_hint", "timeout_seconds")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - mergeOp = op.attributes().getAttrString("merge_op"); - finalOp = op.attributes().getAttrString("final_op"); - subdivOffsets = op.attributes().getAttrIntList("subdiv_offsets"); - waitFor = op.attributes().getAttrIntList("wait_for"); - communicationHint = op.attributes().getAttrString("communication_hint"); - timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java deleted file mode 100644 index 0c09c73f020..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/ReduceV2.java +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.collective; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.Operands; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.framework.DataType; -import org.tensorflow.types.TInt32; -import org.tensorflow.types.family.TNumber; -import org.tensorflow.types.family.TType; - -/** - * Mutually reduces multiple tensors of identical type and shape. - * - * @param data type for {@code data} output - */ -@OpMetadata( - opType = ReduceV2.OP_NAME, - inputsClass = ReduceV2.Inputs.class -) -public final class ReduceV2 extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "CollectiveReduceV2"; - - private Output data; - - public ReduceV2(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - data = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new CollectiveReduceV2 operation. - * - * @param scope current scope - * @param input The input value - * @param groupSize The groupSize value - * @param groupKey The groupKey value - * @param instanceKey The instanceKey value - * @param orderingToken The orderingToken value - * @param mergeOp The value of the mergeOp attribute - * @param finalOp The value of the finalOp attribute - * @param options carries optional attribute values - * @param data type for {@code CollectiveReduceV2} output and operands - * @return a new instance of ReduceV2 - */ - @Endpoint( - describeByClass = true - ) - public static ReduceV2 create(Scope scope, Operand input, - Operand groupSize, Operand groupKey, Operand instanceKey, - Iterable> orderingToken, String mergeOp, String finalOp, - Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ReduceV2"); - opBuilder.addInput(input.asOutput()); - opBuilder.addInput(groupSize.asOutput()); - opBuilder.addInput(groupKey.asOutput()); - opBuilder.addInput(instanceKey.asOutput()); - opBuilder.addInputList(Operands.asOutputs(orderingToken)); - opBuilder.setAttr("merge_op", mergeOp); - opBuilder.setAttr("final_op", finalOp); - if (options != null) { - for (Options opts : options) { - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } - if (opts.timeoutSeconds != null) { - opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); - } - if (opts.NorderingToken != null) { - opBuilder.setAttr("Nordering_token", opts.NorderingToken); - } - if (opts.maxSubdivsPerDevice != null) { - opBuilder.setAttr("max_subdivs_per_device", opts.maxSubdivsPerDevice); - } - } - } - return new ReduceV2<>(opBuilder.build()); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public static Options timeoutSeconds(Float timeoutSeconds) { - return new Options().timeoutSeconds(timeoutSeconds); - } - - /** - * Sets the NorderingToken option. - * - * @param NorderingToken the NorderingToken option - * @return this Options instance. - */ - public static Options NorderingToken(Long NorderingToken) { - return new Options().NorderingToken(NorderingToken); - } - - /** - * Sets the maxSubdivsPerDevice option. - * - * @param maxSubdivsPerDevice the maxSubdivsPerDevice option - * @return this Options instance. - */ - public static Options maxSubdivsPerDevice(Long maxSubdivsPerDevice) { - return new Options().maxSubdivsPerDevice(maxSubdivsPerDevice); - } - - /** - * Gets data. - * - * @return data. - */ - public Output data() { - return data; - } - - @Override - public Output asOutput() { - return data; - } - - /** - * Optional attributes for {@link org.tensorflow.op.collective.ReduceV2} - */ - public static class Options { - private String communicationHint; - - private Float timeoutSeconds; - - private Long NorderingToken; - - private Long maxSubdivsPerDevice; - - private Options() { - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public Options timeoutSeconds(Float timeoutSeconds) { - this.timeoutSeconds = timeoutSeconds; - return this; - } - - /** - * Sets the NorderingToken option. - * - * @param NorderingToken the NorderingToken option - * @return this Options instance. - */ - public Options NorderingToken(Long NorderingToken) { - this.NorderingToken = NorderingToken; - return this; - } - - /** - * Sets the maxSubdivsPerDevice option. - * - * @param maxSubdivsPerDevice the maxSubdivsPerDevice option - * @return this Options instance. - */ - public Options maxSubdivsPerDevice(Long maxSubdivsPerDevice) { - this.maxSubdivsPerDevice = maxSubdivsPerDevice; - return this; - } - } - - @OpInputsMetadata( - outputsClass = ReduceV2.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The groupSize input - */ - public final Operand groupSize; - - /** - * The groupKey input - */ - public final Operand groupKey; - - /** - * The instanceKey input - */ - public final Operand instanceKey; - - /** - * The orderingToken input - */ - public final Iterable> orderingToken; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The mergeOp attribute - */ - public final String mergeOp; - - /** - * The finalOp attribute - */ - public final String finalOp; - - /** - * The communicationHint attribute - */ - public final String communicationHint; - - /** - * The timeoutSeconds attribute - */ - public final float timeoutSeconds; - - /** - * The maxSubdivsPerDevice attribute - */ - public final long maxSubdivsPerDevice; - - public Inputs(GraphOperation op) { - super(new ReduceV2<>(op), op, Arrays.asList("T", "merge_op", "final_op", "communication_hint", "timeout_seconds", "max_subdivs_per_device")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - groupSize = (Operand) op.input(inputIndex++); - groupKey = (Operand) op.input(inputIndex++); - instanceKey = (Operand) op.input(inputIndex++); - int orderingTokenLength = op.inputListLength("ordering_token"); - orderingToken = Arrays.asList((Operand[]) op.inputList(inputIndex, orderingTokenLength)); - inputIndex += orderingTokenLength; - T = op.attributes().getAttrType("T"); - mergeOp = op.attributes().getAttrString("merge_op"); - finalOp = op.attributes().getAttrString("final_op"); - communicationHint = op.attributes().getAttrString("communication_hint"); - timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); - maxSubdivsPerDevice = op.attributes().getAttrInt("max_subdivs_per_device"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java new file mode 100644 index 00000000000..0a14318ca21 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousHashTable.java @@ -0,0 +1,123 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Creates a uninitialized anonymous hash table. + * This op creates a new anonymous hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Before using the table you will have + * to initialize it. After initialization the table will be + * immutable. The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + */ +@OpMetadata( + opType = AnonymousHashTable.OP_NAME, + inputsClass = AnonymousHashTable.Inputs.class +) +public final class AnonymousHashTable extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "AnonymousHashTable"; + + private Output tableHandle; + + @SuppressWarnings("unchecked") + public AnonymousHashTable(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + tableHandle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new AnonymousHashTable operation. + * + * @param scope current scope + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param data type for {@code AnonymousHashTable} output and operands + * @param data type for {@code AnonymousHashTable} output and operands + * @return a new instance of AnonymousHashTable + */ + @Endpoint( + describeByClass = true + ) + public static AnonymousHashTable create(Scope scope, + Class keyDtype, Class valueDtype) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AnonymousHashTable"); + opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); + opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); + return new AnonymousHashTable(opBuilder.build()); + } + + /** + * Gets tableHandle. + * The resource handle to the newly created hash-table resource. + * @return tableHandle. + */ + public Output tableHandle() { + return tableHandle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) tableHandle; + } + + @OpInputsMetadata( + outputsClass = AnonymousHashTable.class + ) + public static class Inputs extends RawOpInputs { + /** + * Type of the table keys. + */ + public final DataType keyDtype; + + /** + * Type of the table values. + */ + public final DataType valueDtype; + + public Inputs(GraphOperation op) { + super(new AnonymousHashTable(op), op, Arrays.asList("key_dtype", "value_dtype")); + int inputIndex = 0; + keyDtype = op.attributes().getAttrType("key_dtype"); + valueDtype = op.attributes().getAttrType("value_dtype"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java deleted file mode 100644 index d30e8e38019..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CollectiveGather.java +++ /dev/null @@ -1,224 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.core; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.ndarray.Shape; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.proto.framework.DataType; -import org.tensorflow.types.family.TNumber; - -/** - * Mutually accumulates multiple tensors of identical type and shape. - * - * @param data type for {@code data} output - * - * @deprecated use {@link org.tensorflow.op.collective.Gather} instead - */ -@OpMetadata( - opType = CollectiveGather.OP_NAME, - inputsClass = CollectiveGather.Inputs.class -) -@Deprecated -public final class CollectiveGather extends RawOp implements Operand { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "CollectiveGather"; - - private Output data; - - public CollectiveGather(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - data = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new CollectiveGather operation. - * - * @param scope current scope - * @param input The input value - * @param groupSize The value of the groupSize attribute - * @param groupKey The value of the groupKey attribute - * @param instanceKey The value of the instanceKey attribute - * @param shape The value of the shape attribute - * @param options carries optional attribute values - * @param data type for {@code CollectiveGather} output and operands - * @return a new instance of CollectiveGather - */ - @Endpoint( - describeByClass = true - ) - public static CollectiveGather create(Scope scope, Operand input, - Long groupSize, Long groupKey, Long instanceKey, Shape shape, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CollectiveGather"); - opBuilder.addInput(input.asOutput()); - opBuilder.setAttr("group_size", groupSize); - opBuilder.setAttr("group_key", groupKey); - opBuilder.setAttr("instance_key", instanceKey); - opBuilder.setAttr("shape", shape); - if (options != null) { - for (Options opts : options) { - if (opts.communicationHint != null) { - opBuilder.setAttr("communication_hint", opts.communicationHint); - } - if (opts.timeoutSeconds != null) { - opBuilder.setAttr("timeout_seconds", opts.timeoutSeconds); - } - } - } - return new CollectiveGather<>(opBuilder.build()); - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public static Options communicationHint(String communicationHint) { - return new Options().communicationHint(communicationHint); - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public static Options timeoutSeconds(Float timeoutSeconds) { - return new Options().timeoutSeconds(timeoutSeconds); - } - - /** - * Gets data. - * - * @return data. - */ - public Output data() { - return data; - } - - @Override - public Output asOutput() { - return data; - } - - /** - * Optional attributes for {@link org.tensorflow.op.core.CollectiveGather} - */ - public static class Options { - private String communicationHint; - - private Float timeoutSeconds; - - private Options() { - } - - /** - * Sets the communicationHint option. - * - * @param communicationHint the communicationHint option - * @return this Options instance. - */ - public Options communicationHint(String communicationHint) { - this.communicationHint = communicationHint; - return this; - } - - /** - * Sets the timeoutSeconds option. - * - * @param timeoutSeconds the timeoutSeconds option - * @return this Options instance. - */ - public Options timeoutSeconds(Float timeoutSeconds) { - this.timeoutSeconds = timeoutSeconds; - return this; - } - } - - @OpInputsMetadata( - outputsClass = CollectiveGather.class - ) - public static class Inputs extends RawOpInputs> { - /** - * The input input - */ - public final Operand input; - - /** - * The T attribute - */ - public final DataType T; - - /** - * The groupSize attribute - */ - public final long groupSize; - - /** - * The groupKey attribute - */ - public final long groupKey; - - /** - * The instanceKey attribute - */ - public final long instanceKey; - - /** - * The shape attribute - */ - public final Shape shape; - - /** - * The communicationHint attribute - */ - public final String communicationHint; - - /** - * The timeoutSeconds attribute - */ - public final float timeoutSeconds; - - public Inputs(GraphOperation op) { - super(new CollectiveGather<>(op), op, Arrays.asList("T", "group_size", "group_key", "instance_key", "shape", "communication_hint", "timeout_seconds")); - int inputIndex = 0; - input = (Operand) op.input(inputIndex++); - T = op.attributes().getAttrType("T"); - groupSize = op.attributes().getAttrInt("group_size"); - groupKey = op.attributes().getAttrInt("group_key"); - instanceKey = op.attributes().getAttrInt("instance_key"); - shape = op.attributes().getAttrShape("shape"); - communicationHint = op.attributes().getAttrString("communication_hint"); - timeoutSeconds = op.attributes().getAttrFloat("timeout_seconds"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java new file mode 100644 index 00000000000..8d23fc0bfde --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetElementAtIndex.java @@ -0,0 +1,139 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +/** + * Gets the element at the specified index in a dataset. + */ +@OpMetadata( + opType = GetElementAtIndex.OP_NAME, + inputsClass = GetElementAtIndex.Inputs.class +) +public final class GetElementAtIndex extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "GetElementAtIndex"; + + private List> components; + + @SuppressWarnings("unchecked") + public GetElementAtIndex(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int componentsLength = operation.outputListLength("components"); + components = Arrays.asList(operation.outputList(outputIdx, componentsLength)); + outputIdx += componentsLength; + } + + /** + * Factory method to create a class wrapping a new GetElementAtIndex operation. + * + * @param scope current scope + * @param dataset The dataset value + * @param index The index value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GetElementAtIndex + */ + @Endpoint( + describeByClass = true + ) + public static GetElementAtIndex create(Scope scope, Operand dataset, + Operand index, List> outputTypes, List outputShapes) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetElementAtIndex"); + opBuilder.addInput(dataset.asOutput()); + opBuilder.addInput(index.asOutput()); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); + return new GetElementAtIndex(opBuilder.build()); + } + + /** + * Gets components. + * + * @return components. + */ + public List> components() { + return components; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) components.iterator(); + } + + @OpInputsMetadata( + outputsClass = GetElementAtIndex.class + ) + public static class Inputs extends RawOpInputs { + /** + * The dataset input + */ + public final Operand dataset; + + /** + * The index input + */ + public final Operand index; + + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + + public Inputs(GraphOperation op) { + super(new GetElementAtIndex(op), op, Arrays.asList("output_types", "output_shapes")); + int inputIndex = 0; + dataset = (Operand) op.input(inputIndex++); + index = (Operand) op.input(inputIndex++); + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java similarity index 98% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java index 60f0af859fd..52a20880245 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/GetOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetOptions.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.core; import java.util.Arrays; import org.tensorflow.GraphOperation; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java index 5bdcfc6b3dd..0094a4567be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java @@ -36,9 +36,7 @@ /** * Reverses specific dimensions of a tensor. - * NOTE {@code tf.reverse} has now changed behavior in preparation for 1.0. - * {@code tf.reverse_v2} is currently an alias that will be deprecated before TF 1.0. - *

    Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of + * Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of * dimensions of {@code tensor} to reverse. This operation reverses each dimension * {@code i} for which there exists {@code j} s.t. {@code axis[j] == i}. *

    {@code tensor} can have up to 8 dimensions. The number of dimensions specified diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java index 24b251c016e..e44348c83a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java @@ -35,34 +35,37 @@ import org.tensorflow.types.family.TType; /** - * Scatter {@code updates} into a new tensor according to {@code indices}. - * Creates a new tensor by applying sparse {@code updates} to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given {@code shape} according to indices. This operator is the inverse of the - * {@code tf.gather_nd} operator which extracts values or slices from a given tensor. - *

    This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling {@code tf.scatter_nd(indices, values, shape)} is identical - * to {@code tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)} - *

    If {@code indices} contains duplicates, then their updates are accumulated (summed). + * Scatters {@code updates} into a tensor of shape {@code shape} according to {@code indices}. + * Update the input tensor by scattering sparse {@code updates} according to individual values at the specified {@code indices}. + * This op returns an {@code output} tensor with the {@code shape} you specify. This op is the + * inverse of the {@code tf.gather_nd} operator which extracts values or slices from a + * given tensor. + *

    This operation is similar to {@code tf.tensor_scatter_add}, except that the tensor is + * zero-initialized. Calling {@code tf.scatter_nd(indices, values, shape)} + * is identical to calling + * {@code tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)}. + *

    If {@code indices} contains duplicates, the duplicate {@code values} are accumulated + * (summed). *

    WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if {@code indices} contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

    {@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: + * output will be nondeterministic if {@code indices} contains duplicates; + * numbers summed in different order may yield different results because of some + * numerical approximation issues. + *

    {@code indices} is an integer tensor of shape {@code shape}. The last dimension + * of {@code indices} can be at most the rank of {@code shape}: *

      * indices.shape[-1] <= shape.rank
      * 
    - *

    The last dimension of {@code indices} corresponds to indices into elements + *

    The last dimension of {@code indices} corresponds to indices of elements * (if {@code indices.shape[-1] = shape.rank}) or slices * (if {@code indices.shape[-1] < shape.rank}) along dimension {@code indices.shape[-1]} of - * {@code shape}. {@code updates} is a tensor with shape + * {@code shape}. + *

    {@code updates} is a tensor with shape: *

      * indices.shape[:-1] + shape[indices.shape[-1]:]
      * 
    - *

    The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. + *

    The simplest form of the scatter op is to insert individual elements in + * a tensor by index. Consider an example where you want to insert 4 scattered + * elements in a rank-1 tensor with 8 elements. *

    * *
    @@ -78,9 +81,9 @@ *
      * [0, 11, 0, 10, 9, 0, 0, 12]
      * 
    - *

    We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. + *

    You can also insert entire slices of a higher rank tensor all at once. For + * example, you can insert two slices in the first dimension of a rank-3 tensor + * with two matrices of new values. *

    * *
    @@ -130,9 +133,9 @@ public ScatterNd(Operation operation) { * Factory method to create a class wrapping a new ScatterNd operation. * * @param scope current scope - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @param shape 1-D. The shape of the resulting tensor. + * @param indices Tensor of indices. + * @param updates Values to scatter into the output tensor. + * @param shape 1-D. The shape of the output tensor. * @param data type for {@code ScatterNd} output and operands * @param data type for {@code ScatterNd} output and operands * @return a new instance of ScatterNd @@ -169,17 +172,17 @@ public Output asOutput() { ) public static class Inputs extends RawOpInputs> { /** - * Index tensor. + * Tensor of indices. */ public final Operand indices; /** - * Updates to scatter into output. + * Values to scatter into the output tensor. */ public final Operand updates; /** - * 1-D. The shape of the resulting tensor. + * 1-D. The shape of the output tensor. */ public final Operand shape; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java index fbd817a8f20..77353b9bb3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/BatchDataset.java @@ -97,6 +97,9 @@ public static BatchDataset create(Scope scope, Operand inputDat if (opts.parallelCopy != null) { opBuilder.setAttr("parallel_copy", opts.parallelCopy); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new BatchDataset(opBuilder.build()); @@ -112,6 +115,16 @@ public static Options parallelCopy(Boolean parallelCopy) { return new Options().parallelCopy(parallelCopy); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -133,6 +146,8 @@ public Output asOutput() { public static class Options { private Boolean parallelCopy; + private String metadata; + private Options() { } @@ -146,6 +161,17 @@ public Options parallelCopy(Boolean parallelCopy) { this.parallelCopy = parallelCopy; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -183,8 +209,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new BatchDataset(op), op, Arrays.asList("parallel_copy", "output_types", "output_shapes")); + super(new BatchDataset(op), op, Arrays.asList("parallel_copy", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); batchSize = (Operand) op.input(inputIndex++); @@ -192,6 +223,7 @@ public Inputs(GraphOperation op) { parallelCopy = op.attributes().getAttrBool("parallel_copy"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java index 842112ec012..f2aae60173a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/CacheDataset.java @@ -71,6 +71,7 @@ public CacheDataset(Operation operation) { * @param cache The cache value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of CacheDataset */ @Endpoint( @@ -78,7 +79,7 @@ public CacheDataset(Operation operation) { ) public static CacheDataset create(Scope scope, Operand inputDataset, Operand filename, Operand cache, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "CacheDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(filename.asOutput()); @@ -89,9 +90,26 @@ public static CacheDataset create(Scope scope, Operand inputDat outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new CacheDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -107,6 +125,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.CacheDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = CacheDataset.class ) @@ -136,14 +175,20 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new CacheDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new CacheDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); filename = (Operand) op.input(inputIndex++); cache = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java index 1a58173cecd..0ecef8a4073 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ConcatenateDataset.java @@ -69,6 +69,7 @@ public ConcatenateDataset(Operation operation) { * @param anotherDataset The anotherDataset value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of ConcatenateDataset */ @Endpoint( @@ -76,7 +77,7 @@ public ConcatenateDataset(Operation operation) { ) public static ConcatenateDataset create(Scope scope, Operand inputDataset, Operand anotherDataset, List> outputTypes, - List outputShapes) { + List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConcatenateDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(anotherDataset.asOutput()); @@ -86,9 +87,26 @@ public static ConcatenateDataset create(Scope scope, Operand in outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new ConcatenateDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -104,6 +122,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.ConcatenateDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = ConcatenateDataset.class ) @@ -128,13 +167,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ConcatenateDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new ConcatenateDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); anotherDataset = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java index bf038dd2493..841bb8e94a4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetToSingleElement.java @@ -71,13 +71,14 @@ public DatasetToSingleElement(Operation operation) { * @param dataset A handle to a dataset that contains a single element. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of DatasetToSingleElement */ @Endpoint( describeByClass = true ) public static DatasetToSingleElement create(Scope scope, Operand dataset, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DatasetToSingleElement"); opBuilder.addInput(dataset.asOutput()); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); @@ -86,9 +87,26 @@ public static DatasetToSingleElement create(Scope scope, Operand> iterator() { return (Iterator) components.iterator(); } + /** + * Optional attributes for {@link org.tensorflow.op.data.DatasetToSingleElement} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = DatasetToSingleElement.class ) @@ -123,12 +162,18 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new DatasetToSingleElement(op), op, Arrays.asList("output_types", "output_shapes")); + super(new DatasetToSingleElement(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; dataset = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java index 473ffa03ea3..dd6e6e6aa25 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FilterDataset.java @@ -78,6 +78,7 @@ public FilterDataset(Operation operation) { * @param predicate A function returning a scalar boolean. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of FilterDataset */ @Endpoint( @@ -85,7 +86,7 @@ public FilterDataset(Operation operation) { ) public static FilterDataset create(Scope scope, Operand inputDataset, Iterable> otherArguments, ConcreteFunction predicate, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FilterDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(otherArguments)); @@ -96,9 +97,26 @@ public static FilterDataset create(Scope scope, Operand inputDa outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new FilterDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -114,6 +132,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.FilterDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = FilterDataset.class ) @@ -144,8 +183,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new FilterDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes")); + super(new FilterDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -154,6 +198,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java index 5613dfd373c..59487ccabe8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FixedLengthRecordDataset.java @@ -69,6 +69,7 @@ public FixedLengthRecordDataset(Operation operation) { * @param footerBytes The footerBytes value * @param bufferSize The bufferSize value * @param compressionType The compressionType value + * @param options carries optional attribute values * @return a new instance of FixedLengthRecordDataset */ @Endpoint( @@ -76,7 +77,7 @@ public FixedLengthRecordDataset(Operation operation) { ) public static FixedLengthRecordDataset create(Scope scope, Operand filenames, Operand headerBytes, Operand recordBytes, Operand footerBytes, - Operand bufferSize, Operand compressionType) { + Operand bufferSize, Operand compressionType, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FixedLengthRecordDataset"); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(headerBytes.asOutput()); @@ -84,9 +85,26 @@ public static FixedLengthRecordDataset create(Scope scope, Operand file opBuilder.addInput(footerBytes.asOutput()); opBuilder.addInput(bufferSize.asOutput()); opBuilder.addInput(compressionType.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new FixedLengthRecordDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -102,6 +120,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.FixedLengthRecordDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = FixedLengthRecordDataset.class ) @@ -136,8 +175,13 @@ public static class Inputs extends RawOpInputs { */ public final Operand compressionType; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new FixedLengthRecordDataset(op), op, Arrays.asList()); + super(new FixedLengthRecordDataset(op), op, Arrays.asList("metadata")); int inputIndex = 0; filenames = (Operand) op.input(inputIndex++); headerBytes = (Operand) op.input(inputIndex++); @@ -145,6 +189,7 @@ public Inputs(GraphOperation op) { footerBytes = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); compressionType = (Operand) op.input(inputIndex++); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java index 02fda22a75d..792c7eba2e2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/FlatMapDataset.java @@ -76,6 +76,7 @@ public FlatMapDataset(Operation operation) { * {@code output_types} and {@code output_shapes}. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of FlatMapDataset */ @Endpoint( @@ -83,7 +84,7 @@ public FlatMapDataset(Operation operation) { ) public static FlatMapDataset create(Scope scope, Operand inputDataset, Iterable> otherArguments, ConcreteFunction f, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FlatMapDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(otherArguments)); @@ -94,9 +95,26 @@ public static FlatMapDataset create(Scope scope, Operand inputD outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new FlatMapDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -112,6 +130,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.FlatMapDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = FlatMapDataset.class ) @@ -141,8 +180,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new FlatMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes")); + super(new FlatMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -151,6 +195,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java index 3b5d55c07b9..fe406328fa5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GeneratorDataset.java @@ -74,6 +74,7 @@ public GeneratorDataset(Operation operation) { * @param finalizeFunc The value of the finalizeFunc attribute * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of GeneratorDataset */ @Endpoint( @@ -82,7 +83,7 @@ public GeneratorDataset(Operation operation) { public static GeneratorDataset create(Scope scope, Iterable> initFuncOtherArgs, Iterable> nextFuncOtherArgs, Iterable> finalizeFuncOtherArgs, ConcreteFunction initFunc, ConcreteFunction nextFunc, ConcreteFunction finalizeFunc, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GeneratorDataset"); opBuilder.addInputList(Operands.asOutputs(initFuncOtherArgs)); opBuilder.addInputList(Operands.asOutputs(nextFuncOtherArgs)); @@ -96,9 +97,26 @@ public static GeneratorDataset create(Scope scope, Iterable> initFunc outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new GeneratorDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -114,6 +132,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.GeneratorDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = GeneratorDataset.class ) @@ -158,8 +197,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new GeneratorDataset(op), op, Arrays.asList("Tinit_func_args", "Tnext_func_args", "Tfinalize_func_args", "output_types", "output_shapes")); + super(new GeneratorDataset(op), op, Arrays.asList("Tinit_func_args", "Tnext_func_args", "Tfinalize_func_args", "output_types", "output_shapes", "metadata")); int inputIndex = 0; int initFuncOtherArgsLength = op.inputListLength("init_func_other_args"); initFuncOtherArgs = Arrays.asList((Operand[]) op.inputList(inputIndex, initFuncOtherArgsLength)); @@ -175,6 +219,7 @@ public Inputs(GraphOperation op) { TfinalizeFuncArgs = op.attributes().getAttrTypeList("Tfinalize_func_args"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java index 13065f77464..d637688841e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/GroupByWindowDataset.java @@ -77,6 +77,7 @@ public GroupByWindowDataset(Operation operation) { * @param windowSizeFunc The value of the windowSizeFunc attribute * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of GroupByWindowDataset */ @Endpoint( @@ -86,7 +87,7 @@ public static GroupByWindowDataset create(Scope scope, Operand Iterable> keyFuncOtherArguments, Iterable> reduceFuncOtherArguments, Iterable> windowSizeFuncOtherArguments, ConcreteFunction keyFunc, ConcreteFunction reduceFunc, ConcreteFunction windowSizeFunc, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GroupByWindowDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(keyFuncOtherArguments)); @@ -101,9 +102,26 @@ public static GroupByWindowDataset create(Scope scope, Operand outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new GroupByWindowDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -119,6 +137,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.GroupByWindowDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = GroupByWindowDataset.class ) @@ -168,8 +207,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new GroupByWindowDataset(op), op, Arrays.asList("Tkey_func_other_arguments", "Treduce_func_other_arguments", "Twindow_size_func_other_arguments", "output_types", "output_shapes")); + super(new GroupByWindowDataset(op), op, Arrays.asList("Tkey_func_other_arguments", "Treduce_func_other_arguments", "Twindow_size_func_other_arguments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int keyFuncOtherArgumentsLength = op.inputListLength("key_func_other_arguments"); @@ -186,6 +230,7 @@ public Inputs(GraphOperation op) { TwindowSizeFuncOtherArguments = op.attributes().getAttrTypeList("Twindow_size_func_other_arguments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java index 4cc5b8d7fdf..0cbb0ceb8dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/InterleaveDataset.java @@ -81,6 +81,7 @@ public InterleaveDataset(Operation operation) { * {@code output_types} and {@code output_shapes}. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of InterleaveDataset */ @Endpoint( @@ -88,7 +89,8 @@ public InterleaveDataset(Operation operation) { ) public static InterleaveDataset create(Scope scope, Operand inputDataset, Iterable> otherArguments, Operand cycleLength, Operand blockLength, - ConcreteFunction f, List> outputTypes, List outputShapes) { + ConcreteFunction f, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "InterleaveDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(otherArguments)); @@ -101,9 +103,26 @@ public static InterleaveDataset create(Scope scope, Operand inp outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new InterleaveDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -119,6 +138,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.InterleaveDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = InterleaveDataset.class ) @@ -158,8 +198,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new InterleaveDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes")); + super(new InterleaveDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -170,6 +215,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java index e80cc1d2319..a9c6af2baeb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LegacyParallelInterleaveDataset.java @@ -115,6 +115,9 @@ public static LegacyParallelInterleaveDataset create(Scope scope, if (opts.deterministic != null) { opBuilder.setAttr("deterministic", opts.deterministic); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new LegacyParallelInterleaveDataset(opBuilder.build()); @@ -130,6 +133,16 @@ public static Options deterministic(String deterministic) { return new Options().deterministic(deterministic); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -151,6 +164,8 @@ public Output asOutput() { public static class Options { private String deterministic; + private String metadata; + private Options() { } @@ -164,6 +179,17 @@ public Options deterministic(String deterministic) { this.deterministic = deterministic; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -220,8 +246,13 @@ public static class Inputs extends RawOpInputs */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new LegacyParallelInterleaveDataset(op), op, Arrays.asList("deterministic", "Targuments", "output_types", "output_shapes")); + super(new LegacyParallelInterleaveDataset(op), op, Arrays.asList("deterministic", "Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -235,6 +266,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java index 6d3f2f0cb84..d7e76fba2b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapAndBatchDataset.java @@ -114,6 +114,9 @@ public static MapAndBatchDataset create(Scope scope, Operand in if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new MapAndBatchDataset(opBuilder.build()); @@ -129,6 +132,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -150,6 +163,8 @@ public Output asOutput() { public static class Options { private Boolean preserveCardinality; + private String metadata; + private Options() { } @@ -163,6 +178,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { this.preserveCardinality = preserveCardinality; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -220,8 +246,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new MapAndBatchDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "preserve_cardinality")); + super(new MapAndBatchDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "preserve_cardinality", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -234,6 +265,7 @@ public Inputs(GraphOperation op) { outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java index 41ed07c9da3..c5b60833b21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/MapDataset.java @@ -98,6 +98,9 @@ public static MapDataset create(Scope scope, Operand inputDatas if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new MapDataset(opBuilder.build()); @@ -123,6 +126,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -146,6 +159,8 @@ public static class Options { private Boolean preserveCardinality; + private String metadata; + private Options() { } @@ -170,6 +185,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { this.preserveCardinality = preserveCardinality; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -211,8 +237,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality")); + super(new MapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "preserve_cardinality", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -223,6 +254,7 @@ public Inputs(GraphOperation op) { outputShapes = op.attributes().getAttrShapeList("output_shapes"); useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java index 63b202eb184..d93b7a8dfd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/OptionsDataset.java @@ -69,14 +69,15 @@ public OptionsDataset(Operation operation) { * @param serializedOptions A {@code tf.string} scalar {@code tf.Tensor} of serialized {@code tf.data.Options} protocol buffer. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of OptionsDataset */ @Endpoint( describeByClass = true ) public static OptionsDataset create(Scope scope, Operand inputDataset, - String serializedOptions, List> outputTypes, - List outputShapes) { + String serializedOptions, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "OptionsDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.setAttr("serialized_options", serializedOptions); @@ -86,9 +87,26 @@ public static OptionsDataset create(Scope scope, Operand inputD outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new OptionsDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -104,6 +122,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.OptionsDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = OptionsDataset.class ) @@ -128,13 +167,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new OptionsDataset(op), op, Arrays.asList("serialized_options", "output_types", "output_shapes")); + super(new OptionsDataset(op), op, Arrays.asList("serialized_options", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); serializedOptions = op.attributes().getAttrString("serialized_options"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java index 5941ac19a16..d054f43fb6c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PaddedBatchDataset.java @@ -105,6 +105,9 @@ public static PaddedBatchDataset create(Scope scope, Operand in if (opts.parallelCopy != null) { opBuilder.setAttr("parallel_copy", opts.parallelCopy); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new PaddedBatchDataset(opBuilder.build()); @@ -120,6 +123,16 @@ public static Options parallelCopy(Boolean parallelCopy) { return new Options().parallelCopy(parallelCopy); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -141,6 +154,8 @@ public Output asOutput() { public static class Options { private Boolean parallelCopy; + private String metadata; + private Options() { } @@ -154,6 +169,17 @@ public Options parallelCopy(Boolean parallelCopy) { this.parallelCopy = parallelCopy; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -206,8 +232,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new PaddedBatchDataset(op), op, Arrays.asList("parallel_copy", "Toutput_types", "output_shapes")); + super(new PaddedBatchDataset(op), op, Arrays.asList("parallel_copy", "Toutput_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); batchSize = (Operand) op.input(inputIndex++); @@ -221,6 +252,7 @@ public Inputs(GraphOperation op) { parallelCopy = op.attributes().getAttrBool("parallel_copy"); ToutputTypes = op.attributes().getAttrTypeList("Toutput_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java index 08e3928b4b6..f88c7b95a17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelBatchDataset.java @@ -101,6 +101,9 @@ public static ParallelBatchDataset create(Scope scope, Operand if (opts.deterministic != null) { opBuilder.setAttr("deterministic", opts.deterministic); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ParallelBatchDataset(opBuilder.build()); @@ -126,6 +129,16 @@ public static Options deterministic(String deterministic) { return new Options().deterministic(deterministic); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -149,6 +162,8 @@ public static class Options { private String deterministic; + private String metadata; + private Options() { } @@ -173,6 +188,17 @@ public Options deterministic(String deterministic) { this.deterministic = deterministic; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -219,8 +245,13 @@ public static class Inputs extends RawOpInputs { */ public final String deterministic; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ParallelBatchDataset(op), op, Arrays.asList("parallel_copy", "output_types", "output_shapes", "deterministic")); + super(new ParallelBatchDataset(op), op, Arrays.asList("parallel_copy", "output_types", "output_shapes", "deterministic", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); batchSize = (Operand) op.input(inputIndex++); @@ -230,6 +261,7 @@ public Inputs(GraphOperation op) { outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); deterministic = op.attributes().getAttrString("deterministic"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java index edbe27c6f73..f4d5eec5b10 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelInterleaveDataset.java @@ -129,6 +129,9 @@ public static ParallelInterleaveDataset create(Scope scope, Operand asOutput() { public static class Options { private String deterministic; + private String metadata; + private Options() { } @@ -186,6 +201,17 @@ public Options deterministic(String deterministic) { this.deterministic = deterministic; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -259,8 +285,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ParallelInterleaveDataset(op), op, Arrays.asList("deterministic", "Targuments", "output_types", "output_shapes")); + super(new ParallelInterleaveDataset(op), op, Arrays.asList("deterministic", "Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -275,6 +306,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java index 1768a7c488d..b3756654dc4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ParallelMapDataset.java @@ -107,6 +107,9 @@ public static ParallelMapDataset create(Scope scope, Operand in if (opts.preserveCardinality != null) { opBuilder.setAttr("preserve_cardinality", opts.preserveCardinality); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ParallelMapDataset(opBuilder.build()); @@ -142,6 +145,16 @@ public static Options preserveCardinality(Boolean preserveCardinality) { return new Options().preserveCardinality(preserveCardinality); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -167,6 +180,8 @@ public static class Options { private Boolean preserveCardinality; + private String metadata; + private Options() { } @@ -202,6 +217,17 @@ public Options preserveCardinality(Boolean preserveCardinality) { this.preserveCardinality = preserveCardinality; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -254,8 +280,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean preserveCardinality; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ParallelMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "deterministic", "preserve_cardinality")); + super(new ParallelMapDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "deterministic", "preserve_cardinality", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -268,6 +299,7 @@ public Inputs(GraphOperation op) { useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); deterministic = op.attributes().getAttrString("deterministic"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java index 8334e1aa890..9a13ed3e2d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/PrefetchDataset.java @@ -100,6 +100,9 @@ public static PrefetchDataset create(Scope scope, Operand input if (opts.bufferSizeMin != null) { opBuilder.setAttr("buffer_size_min", opts.bufferSizeMin); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new PrefetchDataset(opBuilder.build()); @@ -135,6 +138,16 @@ public static Options bufferSizeMin(Long bufferSizeMin) { return new Options().bufferSizeMin(bufferSizeMin); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -160,6 +173,8 @@ public static class Options { private Long bufferSizeMin; + private String metadata; + private Options() { } @@ -195,6 +210,17 @@ public Options bufferSizeMin(Long bufferSizeMin) { this.bufferSizeMin = bufferSizeMin; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -237,8 +263,13 @@ public static class Inputs extends RawOpInputs { */ public final long bufferSizeMin; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new PrefetchDataset(op), op, Arrays.asList("output_types", "output_shapes", "slack_period", "legacy_autotune", "buffer_size_min")); + super(new PrefetchDataset(op), op, Arrays.asList("output_types", "output_shapes", "slack_period", "legacy_autotune", "buffer_size_min", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); @@ -247,6 +278,7 @@ public Inputs(GraphOperation op) { slackPeriod = op.attributes().getAttrInt("slack_period"); legacyAutotune = op.attributes().getAttrBool("legacy_autotune"); bufferSizeMin = op.attributes().getAttrInt("buffer_size_min"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java index 9bab5a5f5b3..384354654a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RandomDataset.java @@ -80,13 +80,14 @@ public RandomDataset(Operation operation) { * @param seed2 A second scalar seed to avoid seed collision. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of RandomDataset */ @Endpoint( describeByClass = true ) public static RandomDataset create(Scope scope, Operand seed, Operand seed2, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RandomDataset"); opBuilder.addInput(seed.asOutput()); opBuilder.addInput(seed2.asOutput()); @@ -96,9 +97,26 @@ public static RandomDataset create(Scope scope, Operand seed, Operand asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.RandomDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = RandomDataset.class ) @@ -140,13 +179,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new RandomDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new RandomDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; seed = (Operand) op.input(inputIndex++); seed2 = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java index fe3b9afedfe..0276ef3c7d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RangeDataset.java @@ -71,13 +71,15 @@ public RangeDataset(Operation operation) { * @param step corresponds to step in python's xrange(). * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of RangeDataset */ @Endpoint( describeByClass = true ) public static RangeDataset create(Scope scope, Operand start, Operand stop, - Operand step, List> outputTypes, List outputShapes) { + Operand step, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RangeDataset"); opBuilder.addInput(start.asOutput()); opBuilder.addInput(stop.asOutput()); @@ -88,9 +90,26 @@ public static RangeDataset create(Scope scope, Operand start, Operand asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.RangeDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = RangeDataset.class ) @@ -135,14 +175,20 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new RangeDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new RangeDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; start = (Operand) op.input(inputIndex++); stop = (Operand) op.input(inputIndex++); step = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java index 26d4cdb1210..997e2af4b1c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ReduceDataset.java @@ -103,6 +103,9 @@ public static ReduceDataset create(Scope scope, Operand inputDa if (opts.useInterOpParallelism != null) { opBuilder.setAttr("use_inter_op_parallelism", opts.useInterOpParallelism); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ReduceDataset(opBuilder.build()); @@ -118,6 +121,16 @@ public static Options useInterOpParallelism(Boolean useInterOpParallelism) { return new Options().useInterOpParallelism(useInterOpParallelism); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets components. * @@ -139,6 +152,8 @@ public Iterator> iterator() { public static class Options { private Boolean useInterOpParallelism; + private String metadata; + private Options() { } @@ -152,6 +167,17 @@ public Options useInterOpParallelism(Boolean useInterOpParallelism) { this.useInterOpParallelism = useInterOpParallelism; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -199,8 +225,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useInterOpParallelism; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ReduceDataset(op), op, Arrays.asList("Tstate", "Targuments", "output_types", "output_shapes", "use_inter_op_parallelism")); + super(new ReduceDataset(op), op, Arrays.asList("Tstate", "Targuments", "output_types", "output_shapes", "use_inter_op_parallelism", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int initialStateLength = op.inputListLength("initial_state"); @@ -214,6 +245,7 @@ public Inputs(GraphOperation op) { outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); useInterOpParallelism = op.attributes().getAttrBool("use_inter_op_parallelism"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java index 73adca2a8a1..466a00ae942 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java @@ -66,21 +66,40 @@ public RegisterDataset(Operation operation) { * @param address The address value * @param protocol The protocol value * @param externalStatePolicy The value of the externalStatePolicy attribute + * @param options carries optional attribute values * @return a new instance of RegisterDataset */ @Endpoint( describeByClass = true ) public static RegisterDataset create(Scope scope, Operand dataset, - Operand address, Operand protocol, Long externalStatePolicy) { + Operand address, Operand protocol, Long externalStatePolicy, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RegisterDataset"); opBuilder.addInput(dataset.asOutput()); opBuilder.addInput(address.asOutput()); opBuilder.addInput(protocol.asOutput()); opBuilder.setAttr("external_state_policy", externalStatePolicy); + if (options != null) { + for (Options opts : options) { + if (opts.elementSpec != null) { + opBuilder.setAttr("element_spec", opts.elementSpec); + } + } + } return new RegisterDataset(opBuilder.build()); } + /** + * Sets the elementSpec option. + * + * @param elementSpec the elementSpec option + * @return this Options instance. + */ + public static Options elementSpec(String elementSpec) { + return new Options().elementSpec(elementSpec); + } + /** * Gets datasetId. * @@ -95,6 +114,27 @@ public Output asOutput() { return datasetId; } + /** + * Optional attributes for {@link org.tensorflow.op.data.RegisterDataset} + */ + public static class Options { + private String elementSpec; + + private Options() { + } + + /** + * Sets the elementSpec option. + * + * @param elementSpec the elementSpec option + * @return this Options instance. + */ + public Options elementSpec(String elementSpec) { + this.elementSpec = elementSpec; + return this; + } + } + @OpInputsMetadata( outputsClass = RegisterDataset.class ) @@ -119,13 +159,19 @@ public static class Inputs extends RawOpInputs { */ public final long externalStatePolicy; + /** + * The elementSpec attribute + */ + public final String elementSpec; + public Inputs(GraphOperation op) { - super(new RegisterDataset(op), op, Arrays.asList("external_state_policy")); + super(new RegisterDataset(op), op, Arrays.asList("external_state_policy", "element_spec")); int inputIndex = 0; dataset = (Operand) op.input(inputIndex++); address = (Operand) op.input(inputIndex++); protocol = (Operand) op.input(inputIndex++); externalStatePolicy = op.attributes().getAttrInt("external_state_policy"); + elementSpec = op.attributes().getAttrString("element_spec"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java index 5139087b24f..5a192b6a443 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RepeatDataset.java @@ -71,13 +71,15 @@ public RepeatDataset(Operation operation) { * be repeated. A value of {@code -1} indicates that it should be repeated infinitely. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of RepeatDataset */ @Endpoint( describeByClass = true ) public static RepeatDataset create(Scope scope, Operand inputDataset, - Operand count, List> outputTypes, List outputShapes) { + Operand count, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RepeatDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); @@ -87,9 +89,26 @@ public static RepeatDataset create(Scope scope, Operand inputDa outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new RepeatDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -105,6 +124,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.RepeatDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = RepeatDataset.class ) @@ -130,13 +170,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new RepeatDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new RepeatDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); count = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java index 709cf5a7037..4ada7941ad5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SaveDataset.java @@ -18,11 +18,14 @@ package org.tensorflow.op.data; import java.util.Arrays; +import java.util.List; import org.tensorflow.ConcreteFunction; import org.tensorflow.GraphOperation; import org.tensorflow.Operand; import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Operands; import org.tensorflow.op.RawOp; import org.tensorflow.op.RawOpInputs; @@ -36,7 +39,7 @@ import org.tensorflow.types.family.TType; /** - * The SaveDataset operation + * The SaveDatasetV2 operation */ @OpMetadata( opType = SaveDataset.OP_NAME, @@ -45,24 +48,31 @@ @Operator( group = "data" ) -public final class SaveDataset extends RawOp { +public final class SaveDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "SaveDataset"; + public static final String OP_NAME = "SaveDatasetV2"; + private Output handle; + + @SuppressWarnings("unchecked") public SaveDataset(Operation operation) { super(operation, OP_NAME); + int outputIdx = 0; + handle = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new SaveDataset operation. + * Factory method to create a class wrapping a new SaveDatasetV2 operation. * * @param scope current scope * @param inputDataset The inputDataset value * @param path The path value * @param shardFuncOtherArgs The shardFuncOtherArgs value * @param shardFunc The value of the shardFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute * @param options carries optional attribute values * @return a new instance of SaveDataset */ @@ -71,12 +81,18 @@ public SaveDataset(Operation operation) { ) public static SaveDataset create(Scope scope, Operand inputDataset, Operand path, Iterable> shardFuncOtherArgs, ConcreteFunction shardFunc, - Options... options) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SaveDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(path.asOutput()); opBuilder.addInputList(Operands.asOutputs(shardFuncOtherArgs)); opBuilder.setAttr("shard_func", shardFunc); + opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); + Shape[] outputShapesArray = new Shape[outputShapes.size()]; + for (int i = 0 ; i < outputShapesArray.length ; i++) { + outputShapesArray[i] = outputShapes.get(i); + } + opBuilder.setAttr("output_shapes", outputShapesArray); if (options != null) { for (Options opts : options) { if (opts.compression != null) { @@ -110,6 +126,21 @@ public static Options useShardFunc(Boolean useShardFunc) { return new Options().useShardFunc(useShardFunc); } + /** + * Gets handle. + * + * @return handle. + */ + public Output handle() { + return handle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; + } + /** * Optional attributes for {@link org.tensorflow.op.data.SaveDataset} */ @@ -178,8 +209,18 @@ public static class Inputs extends RawOpInputs { */ public final DataType[] TshardFuncArgs; + /** + * The outputTypes attribute + */ + public final DataType[] outputTypes; + + /** + * The outputShapes attribute + */ + public final Shape[] outputShapes; + public Inputs(GraphOperation op) { - super(new SaveDataset(op), op, Arrays.asList("compression", "use_shard_func", "Tshard_func_args")); + super(new SaveDataset(op), op, Arrays.asList("compression", "use_shard_func", "Tshard_func_args", "output_types", "output_shapes")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); path = (Operand) op.input(inputIndex++); @@ -189,6 +230,8 @@ public Inputs(GraphOperation op) { compression = op.attributes().getAttrString("compression"); useShardFunc = op.attributes().getAttrBool("use_shard_func"); TshardFuncArgs = op.attributes().getAttrTypeList("Tshard_func_args"); + outputTypes = op.attributes().getAttrTypeList("output_types"); + outputShapes = op.attributes().getAttrShapeList("output_shapes"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java index dcb6a1d8309..82bfa42bb7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ScanDataset.java @@ -100,6 +100,9 @@ public static ScanDataset create(Scope scope, Operand inputData if (opts.useDefaultDevice != null) { opBuilder.setAttr("use_default_device", opts.useDefaultDevice); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ScanDataset(opBuilder.build()); @@ -125,6 +128,16 @@ public static Options useDefaultDevice(Boolean useDefaultDevice) { return new Options().useDefaultDevice(useDefaultDevice); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -148,6 +161,8 @@ public static class Options { private Boolean useDefaultDevice; + private String metadata; + private Options() { } @@ -172,6 +187,17 @@ public Options useDefaultDevice(Boolean useDefaultDevice) { this.useDefaultDevice = useDefaultDevice; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -223,8 +249,13 @@ public static class Inputs extends RawOpInputs { */ public final boolean useDefaultDevice; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ScanDataset(op), op, Arrays.asList("Tstate", "Targuments", "output_types", "output_shapes", "preserve_cardinality", "use_default_device")); + super(new ScanDataset(op), op, Arrays.asList("Tstate", "Targuments", "output_types", "output_shapes", "preserve_cardinality", "use_default_device", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int initialStateLength = op.inputListLength("initial_state"); @@ -239,6 +270,7 @@ public Inputs(GraphOperation op) { outputShapes = op.attributes().getAttrShapeList("output_shapes"); preserveCardinality = op.attributes().getAttrBool("preserve_cardinality"); useDefaultDevice = op.attributes().getAttrBool("use_default_device"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java index 951878e72d4..a8e82658e87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShardDataset.java @@ -95,6 +95,9 @@ public static ShardDataset create(Scope scope, Operand inputDat if (opts.requireNonEmpty != null) { opBuilder.setAttr("require_non_empty", opts.requireNonEmpty); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ShardDataset(opBuilder.build()); @@ -110,6 +113,16 @@ public static Options requireNonEmpty(Boolean requireNonEmpty) { return new Options().requireNonEmpty(requireNonEmpty); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -131,6 +144,8 @@ public Output asOutput() { public static class Options { private Boolean requireNonEmpty; + private String metadata; + private Options() { } @@ -144,6 +159,17 @@ public Options requireNonEmpty(Boolean requireNonEmpty) { this.requireNonEmpty = requireNonEmpty; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -180,8 +206,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ShardDataset(op), op, Arrays.asList("require_non_empty", "output_types", "output_shapes")); + super(new ShardDataset(op), op, Arrays.asList("require_non_empty", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); numShards = (Operand) op.input(inputIndex++); @@ -189,6 +220,7 @@ public Inputs(GraphOperation op) { requireNonEmpty = op.attributes().getAttrBool("require_non_empty"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java index 318f305bd8b..ee0709d05e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleAndRepeatDataset.java @@ -102,6 +102,9 @@ public static ShuffleAndRepeatDataset create(Scope scope, Operand asOutput() { public static class Options { private Boolean reshuffleEachIteration; + private String metadata; + private Options() { } @@ -151,6 +166,17 @@ public Options reshuffleEachIteration(Boolean reshuffleEachIteration) { this.reshuffleEachIteration = reshuffleEachIteration; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -202,8 +228,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ShuffleAndRepeatDataset(op), op, Arrays.asList("reshuffle_each_iteration", "output_types", "output_shapes")); + super(new ShuffleAndRepeatDataset(op), op, Arrays.asList("reshuffle_each_iteration", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); @@ -214,6 +245,7 @@ public Inputs(GraphOperation op) { reshuffleEachIteration = op.attributes().getAttrBool("reshuffle_each_iteration"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java index 92cf83d4c4d..67cb08d486a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ShuffleDataset.java @@ -100,6 +100,9 @@ public static ShuffleDataset create(Scope scope, Operand inputD if (opts.reshuffleEachIteration != null) { opBuilder.setAttr("reshuffle_each_iteration", opts.reshuffleEachIteration); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new ShuffleDataset(opBuilder.build()); @@ -115,6 +118,16 @@ public static Options reshuffleEachIteration(Boolean reshuffleEachIteration) { return new Options().reshuffleEachIteration(reshuffleEachIteration); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -136,6 +149,8 @@ public Output asOutput() { public static class Options { private Boolean reshuffleEachIteration; + private String metadata; + private Options() { } @@ -149,6 +164,17 @@ public Options reshuffleEachIteration(Boolean reshuffleEachIteration) { this.reshuffleEachIteration = reshuffleEachIteration; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -195,8 +221,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ShuffleDataset(op), op, Arrays.asList("reshuffle_each_iteration", "output_types", "output_shapes")); + super(new ShuffleDataset(op), op, Arrays.asList("reshuffle_each_iteration", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); @@ -206,6 +237,7 @@ public Inputs(GraphOperation op) { reshuffleEachIteration = op.attributes().getAttrBool("reshuffle_each_iteration"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java index 2734d6ff08d..db0dd1e15d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SkipDataset.java @@ -71,13 +71,15 @@ public SkipDataset(Operation operation) { * that should be skipped. If count is -1, skips everything. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of SkipDataset */ @Endpoint( describeByClass = true ) public static SkipDataset create(Scope scope, Operand inputDataset, - Operand count, List> outputTypes, List outputShapes) { + Operand count, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SkipDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); @@ -87,9 +89,26 @@ public static SkipDataset create(Scope scope, Operand inputData outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new SkipDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -105,6 +124,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.SkipDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = SkipDataset.class ) @@ -130,13 +170,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new SkipDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new SkipDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); count = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java index 323fa9514b3..609648b07db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SnapshotDataset.java @@ -120,6 +120,9 @@ public static SnapshotDataset create(Scope scope, Operand input if (opts.hash != null) { opBuilder.setAttr("hash", opts.hash); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new SnapshotDataset(opBuilder.build()); @@ -175,6 +178,16 @@ public static Options hash(Long hash) { return new Options().hash(hash); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -204,6 +217,8 @@ public static class Options { private Long hash; + private String metadata; + private Options() { } @@ -261,6 +276,17 @@ public Options hash(Long hash) { this.hash = hash; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -332,8 +358,13 @@ public static class Inputs extends RawOpInputs { */ public final DataType[] TshardFuncArgs; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new SnapshotDataset(op), op, Arrays.asList("output_types", "output_shapes", "compression", "reader_prefix", "writer_prefix", "hash_valid", "hash", "Treader_func_args", "Tshard_func_args")); + super(new SnapshotDataset(op), op, Arrays.asList("output_types", "output_shapes", "compression", "reader_prefix", "writer_prefix", "hash_valid", "hash", "Treader_func_args", "Tshard_func_args", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); path = (Operand) op.input(inputIndex++); @@ -352,6 +383,7 @@ public Inputs(GraphOperation op) { hash = op.attributes().getAttrInt("hash"); TreaderFuncArgs = op.attributes().getAttrTypeList("Treader_func_args"); TshardFuncArgs = op.attributes().getAttrTypeList("Tshard_func_args"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java index 83cdbb86ee1..67ea88c4395 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeDataset.java @@ -72,13 +72,15 @@ public TakeDataset(Operation operation) { * is taken. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TakeDataset */ @Endpoint( describeByClass = true ) public static TakeDataset create(Scope scope, Operand inputDataset, - Operand count, List> outputTypes, List outputShapes) { + Operand count, List> outputTypes, List outputShapes, + Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TakeDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(count.asOutput()); @@ -88,9 +90,26 @@ public static TakeDataset create(Scope scope, Operand inputData outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TakeDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -106,6 +125,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TakeDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TakeDataset.class ) @@ -132,13 +172,19 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TakeDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new TakeDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); count = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java index 372da7e0af7..fabeac00065 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TakeWhileDataset.java @@ -78,6 +78,7 @@ public TakeWhileDataset(Operation operation) { * @param predicate A function returning a scalar boolean. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TakeWhileDataset */ @Endpoint( @@ -85,7 +86,7 @@ public TakeWhileDataset(Operation operation) { ) public static TakeWhileDataset create(Scope scope, Operand inputDataset, Iterable> otherArguments, ConcreteFunction predicate, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TakeWhileDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInputList(Operands.asOutputs(otherArguments)); @@ -96,9 +97,26 @@ public static TakeWhileDataset create(Scope scope, Operand inpu outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TakeWhileDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -114,6 +132,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TakeWhileDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TakeWhileDataset.class ) @@ -144,8 +183,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TakeWhileDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes")); + super(new TakeWhileDataset(op), op, Arrays.asList("Targuments", "output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); int otherArgumentsLength = op.inputListLength("other_arguments"); @@ -154,6 +198,7 @@ public Inputs(GraphOperation op) { Targuments = op.attributes().getAttrTypeList("Targuments"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java index 59f597498a6..d008d80095f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorDataset.java @@ -67,13 +67,14 @@ public TensorDataset(Operation operation) { * @param scope current scope * @param components The components value * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TensorDataset */ @Endpoint( describeByClass = true ) public static TensorDataset create(Scope scope, Iterable> components, - List outputShapes) { + List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorDataset"); opBuilder.addInputList(Operands.asOutputs(components)); Shape[] outputShapesArray = new Shape[outputShapes.size()]; @@ -81,9 +82,26 @@ public static TensorDataset create(Scope scope, Iterable> components, outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TensorDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -99,6 +117,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TensorDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorDataset.class ) @@ -118,14 +157,20 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TensorDataset(op), op, Arrays.asList("Toutput_types", "output_shapes")); + super(new TensorDataset(op), op, Arrays.asList("Toutput_types", "output_shapes", "metadata")); int inputIndex = 0; int componentsLength = op.inputListLength("components"); components = Arrays.asList((Operand[]) op.inputList(inputIndex, componentsLength)); inputIndex += componentsLength; ToutputTypes = op.attributes().getAttrTypeList("Toutput_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java index 1e74a18e2b7..c6f1711d264 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TensorSliceDataset.java @@ -67,13 +67,14 @@ public TensorSliceDataset(Operation operation) { * @param scope current scope * @param components The components value * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TensorSliceDataset */ @Endpoint( describeByClass = true ) public static TensorSliceDataset create(Scope scope, Iterable> components, - List outputShapes) { + List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TensorSliceDataset"); opBuilder.addInputList(Operands.asOutputs(components)); Shape[] outputShapesArray = new Shape[outputShapes.size()]; @@ -81,9 +82,39 @@ public static TensorSliceDataset create(Scope scope, Iterable> compon outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.isFiles != null) { + opBuilder.setAttr("is_files", opts.isFiles); + } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TensorSliceDataset(opBuilder.build()); } + /** + * Sets the isFiles option. + * + * @param isFiles the isFiles option + * @return this Options instance. + */ + public static Options isFiles(Boolean isFiles) { + return new Options().isFiles(isFiles); + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -99,6 +130,40 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TensorSliceDataset} + */ + public static class Options { + private Boolean isFiles; + + private String metadata; + + private Options() { + } + + /** + * Sets the isFiles option. + * + * @param isFiles the isFiles option + * @return this Options instance. + */ + public Options isFiles(Boolean isFiles) { + this.isFiles = isFiles; + return this; + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TensorSliceDataset.class ) @@ -118,14 +183,26 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The isFiles attribute + */ + public final boolean isFiles; + + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TensorSliceDataset(op), op, Arrays.asList("Toutput_types", "output_shapes")); + super(new TensorSliceDataset(op), op, Arrays.asList("Toutput_types", "output_shapes", "is_files", "metadata")); int inputIndex = 0; int componentsLength = op.inputListLength("components"); components = Arrays.asList((Operand[]) op.inputList(inputIndex, componentsLength)); inputIndex += componentsLength; ToutputTypes = op.attributes().getAttrTypeList("Toutput_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + isFiles = op.attributes().getAttrBool("is_files"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java index e58a0eaad0a..106a2440243 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TextLineDataset.java @@ -68,20 +68,38 @@ public TextLineDataset(Operation operation) { * @param compressionType A scalar containing either (i) the empty string (no * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar containing the number of bytes to buffer. + * @param options carries optional attribute values * @return a new instance of TextLineDataset */ @Endpoint( describeByClass = true ) public static TextLineDataset create(Scope scope, Operand filenames, - Operand compressionType, Operand bufferSize) { + Operand compressionType, Operand bufferSize, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TextLineDataset"); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TextLineDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -97,6 +115,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TextLineDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TextLineDataset.class ) @@ -118,12 +157,18 @@ public static class Inputs extends RawOpInputs { */ public final Operand bufferSize; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TextLineDataset(op), op, Arrays.asList()); + super(new TextLineDataset(op), op, Arrays.asList("metadata")); int inputIndex = 0; filenames = (Operand) op.input(inputIndex++); compressionType = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java index d46b73555f4..fc84dd70094 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/TfRecordDataset.java @@ -69,20 +69,38 @@ public TfRecordDataset(Operation operation) { * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar representing the number of bytes to buffer. A value of * 0 means no buffering will be performed. + * @param options carries optional attribute values * @return a new instance of TfRecordDataset */ @Endpoint( describeByClass = true ) public static TfRecordDataset create(Scope scope, Operand filenames, - Operand compressionType, Operand bufferSize) { + Operand compressionType, Operand bufferSize, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "TfRecordDataset"); opBuilder.addInput(filenames.asOutput()); opBuilder.addInput(compressionType.asOutput()); opBuilder.addInput(bufferSize.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new TfRecordDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -98,6 +116,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.TfRecordDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = TfRecordDataset.class ) @@ -120,12 +159,18 @@ public static class Inputs extends RawOpInputs { */ public final Operand bufferSize; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new TfRecordDataset(op), op, Arrays.asList()); + super(new TfRecordDataset(op), op, Arrays.asList("metadata")); int inputIndex = 0; filenames = (Operand) op.input(inputIndex++); compressionType = (Operand) op.input(inputIndex++); bufferSize = (Operand) op.input(inputIndex++); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java index 5256a74b953..88e05e591ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UnbatchDataset.java @@ -68,13 +68,14 @@ public UnbatchDataset(Operation operation) { * @param inputDataset The inputDataset value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of UnbatchDataset */ @Endpoint( describeByClass = true ) public static UnbatchDataset create(Scope scope, Operand inputDataset, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "UnbatchDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); @@ -83,9 +84,26 @@ public static UnbatchDataset create(Scope scope, Operand inputD outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new UnbatchDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -101,6 +119,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.UnbatchDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = UnbatchDataset.class ) @@ -120,12 +159,18 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new UnbatchDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new UnbatchDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java index 61cb6ac4538..f6e4f04fdd9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/UniqueDataset.java @@ -68,13 +68,14 @@ public UniqueDataset(Operation operation) { * @param inputDataset The inputDataset value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of UniqueDataset */ @Endpoint( describeByClass = true ) public static UniqueDataset create(Scope scope, Operand inputDataset, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "UniqueDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); @@ -83,9 +84,26 @@ public static UniqueDataset create(Scope scope, Operand inputDa outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new UniqueDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -101,6 +119,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.UniqueDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = UniqueDataset.class ) @@ -120,12 +159,18 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new UniqueDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new UniqueDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java index 384bcf89c8e..583865b3598 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowDataset.java @@ -115,6 +115,7 @@ public WindowDataset(Operation operation) { * dropped if its size is smaller than {@code window_size}. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of WindowDataset */ @Endpoint( @@ -123,7 +124,7 @@ public WindowDataset(Operation operation) { public static WindowDataset create(Scope scope, Operand inputDataset, Operand sizeOutput, Operand shift, Operand stride, Operand dropRemainder, List> outputTypes, - List outputShapes) { + List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "WindowDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(sizeOutput.asOutput()); @@ -136,9 +137,26 @@ public static WindowDataset create(Scope scope, Operand inputDa outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new WindowDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -154,6 +172,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.WindowDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = WindowDataset.class ) @@ -199,8 +238,13 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new WindowDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new WindowDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); sizeOutput = (Operand) op.input(inputIndex++); @@ -209,6 +253,7 @@ public Inputs(GraphOperation op) { dropRemainder = (Operand) op.input(inputIndex++); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java similarity index 82% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java index ab241b515ae..8943faa9728 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Window.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/WindowOp.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.core; +package org.tensorflow.op.data; import java.util.Arrays; import java.util.List; @@ -36,42 +36,42 @@ import org.tensorflow.types.family.TType; /** - * The Window operation + * The WindowOp operation */ @OpMetadata( - opType = Window.OP_NAME, - inputsClass = Window.Inputs.class + opType = WindowOp.OP_NAME, + inputsClass = WindowOp.Inputs.class ) -public final class Window extends RawOp implements Operand { +public final class WindowOp extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "Window"; + public static final String OP_NAME = "WindowOp"; private Output handle; @SuppressWarnings("unchecked") - public Window(Operation operation) { + public WindowOp(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new Window operation. + * Factory method to create a class wrapping a new WindowOp operation. * * @param scope current scope * @param inputs The inputs value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute - * @return a new instance of Window + * @return a new instance of WindowOp */ @Endpoint( describeByClass = true ) - public static Window create(Scope scope, Iterable> inputs, + public static WindowOp create(Scope scope, Iterable> inputs, List> outputTypes, List outputShapes) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "Window"); + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "WindowOp"); opBuilder.addInputList(Operands.asOutputs(inputs)); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); Shape[] outputShapesArray = new Shape[outputShapes.size()]; @@ -79,7 +79,7 @@ public static Window create(Scope scope, Iterable> inputs, outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); - return new Window(opBuilder.build()); + return new WindowOp(opBuilder.build()); } /** @@ -98,9 +98,9 @@ public Output asOutput() { } @OpInputsMetadata( - outputsClass = Window.class + outputsClass = WindowOp.class ) - public static class Inputs extends RawOpInputs { + public static class Inputs extends RawOpInputs { /** * The inputs input */ @@ -122,7 +122,7 @@ public static class Inputs extends RawOpInputs { public final DataType[] Tinputs; public Inputs(GraphOperation op) { - super(new Window(op), op, Arrays.asList("output_types", "output_shapes", "Tinputs")); + super(new WindowOp(op), op, Arrays.asList("output_types", "output_shapes", "Tinputs")); int inputIndex = 0; int inputsLength = op.inputListLength("inputs"); inputs = Arrays.asList((Operand[]) op.inputList(inputIndex, inputsLength)); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java index 89192eb8559..1f859f61ee4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ZipDataset.java @@ -72,13 +72,14 @@ public ZipDataset(Operation operation) { * @param inputDatasets List of {@code N} variant Tensors representing datasets to be zipped together. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of ZipDataset */ @Endpoint( describeByClass = true ) public static ZipDataset create(Scope scope, Iterable> inputDatasets, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ZipDataset"); opBuilder.addInputList(Operands.asOutputs(inputDatasets)); opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes)); @@ -87,9 +88,26 @@ public static ZipDataset create(Scope scope, Iterable> outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } + } + } return new ZipDataset(opBuilder.build()); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets handle. * @@ -105,6 +123,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.ZipDataset} + */ + public static class Options { + private String metadata; + + private Options() { + } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } + } + @OpInputsMetadata( outputsClass = ZipDataset.class ) @@ -124,14 +163,20 @@ public static class Inputs extends RawOpInputs { */ public final Shape[] outputShapes; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new ZipDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new ZipDataset(op), op, Arrays.asList("output_types", "output_shapes", "metadata")); int inputIndex = 0; int inputDatasetsLength = op.inputListLength("input_datasets"); inputDatasets = Arrays.asList((Operand[]) op.inputList(inputIndex, inputDatasetsLength)); inputIndex += inputDatasetsLength; outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java similarity index 98% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java index eef87bc6798..451f0698abe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetAlg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetAlg.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.random; import java.util.Arrays; import org.tensorflow.GraphOperation; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java similarity index 99% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java index 184f9765210..e9afa920907 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/StatelessRandomGetKeyCounter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGetKeyCounter.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.random; import java.util.Arrays; import org.tensorflow.GraphOperation; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java deleted file mode 100644 index 20dff3115d1..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ /dev/null @@ -1,231 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.rawops; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load frequency estimator embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"; - - public LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the frequency estimator optimization algorithm. - * @param lastHitStep Value of last_hit_step used in the frequency estimator optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the frequency estimator optimization - * algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand lastHitStep, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(lastHitStep.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.rawops.LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the frequency estimator optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of last_hit_step used in the frequency estimator optimization algorithm. - */ - public final Operand lastHitStep; - - /** - * Value of gradient_accumulators used in the frequency estimator optimization - * algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - lastHitStep = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java deleted file mode 100644 index 15fb4ba7a5c..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.java +++ /dev/null @@ -1,242 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.rawops; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve frequency estimator embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"; - - private Output parameters; - - private Output lastHitStep; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - lastHitStep = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the frequency estimator optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets lastHitStep. - * Parameter last_hit_step updated by the frequency estimator optimization - * algorithm. - * @return lastHitStep. - */ - public Output lastHitStep() { - return lastHitStep; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the frequency estimator optimization - * algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.rawops.RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java index 6cc6432a70d..b55e8249078 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConfigureDistributedTPU.java @@ -81,6 +81,9 @@ public static ConfigureDistributedTPU create(Scope scope, Options... options) { if (opts.compilationFailureClosesChips != null) { opBuilder.setAttr("compilation_failure_closes_chips", opts.compilationFailureClosesChips); } + if (opts.tpuCancellationClosesChips != null) { + opBuilder.setAttr("tpu_cancellation_closes_chips", opts.tpuCancellationClosesChips); + } } } return new ConfigureDistributedTPU(opBuilder.build()); @@ -137,6 +140,16 @@ public static Options compilationFailureClosesChips(Boolean compilationFailureCl return new Options().compilationFailureClosesChips(compilationFailureClosesChips); } + /** + * Sets the tpuCancellationClosesChips option. + * + * @param tpuCancellationClosesChips the tpuCancellationClosesChips option + * @return this Options instance. + */ + public static Options tpuCancellationClosesChips(Long tpuCancellationClosesChips) { + return new Options().tpuCancellationClosesChips(tpuCancellationClosesChips); + } + /** * Gets topology. * A serialized tensorflow.tpu.TopologyProto that describes the TPU @@ -166,6 +179,8 @@ public static class Options { private Boolean compilationFailureClosesChips; + private Long tpuCancellationClosesChips; + private Options() { } @@ -224,6 +239,17 @@ public Options compilationFailureClosesChips(Boolean compilationFailureClosesChi this.compilationFailureClosesChips = compilationFailureClosesChips; return this; } + + /** + * Sets the tpuCancellationClosesChips option. + * + * @param tpuCancellationClosesChips the tpuCancellationClosesChips option + * @return this Options instance. + */ + public Options tpuCancellationClosesChips(Long tpuCancellationClosesChips) { + this.tpuCancellationClosesChips = tpuCancellationClosesChips; + return this; + } } @OpInputsMetadata( @@ -256,14 +282,20 @@ public static class Inputs extends RawOpInputs { */ public final boolean compilationFailureClosesChips; + /** + * The tpuCancellationClosesChips attribute + */ + public final long tpuCancellationClosesChips; + public Inputs(GraphOperation op) { - super(new ConfigureDistributedTPU(op), op, Arrays.asList("embedding_config", "tpu_embedding_config", "is_global_init", "enable_whole_mesh_compilations", "compilation_failure_closes_chips")); + super(new ConfigureDistributedTPU(op), op, Arrays.asList("embedding_config", "tpu_embedding_config", "is_global_init", "enable_whole_mesh_compilations", "compilation_failure_closes_chips", "tpu_cancellation_closes_chips")); int inputIndex = 0; embeddingConfig = op.attributes().getAttrString("embedding_config"); tpuEmbeddingConfig = op.attributes().getAttrString("tpu_embedding_config"); isGlobalInit = op.attributes().getAttrBool("is_global_init"); enableWholeMeshCompilations = op.attributes().getAttrBool("enable_whole_mesh_compilations"); compilationFailureClosesChips = op.attributes().getAttrBool("compilation_failure_closes_chips"); + tpuCancellationClosesChips = op.attributes().getAttrInt("tpu_cancellation_closes_chips"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java new file mode 100644 index 00000000000..b70ad115492 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java @@ -0,0 +1,92 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TBool; + +/** + * Whether TPU Embedding is initialized in a distributed TPU system. + */ +@OpMetadata( + opType = IsTPUEmbeddingInitialized.OP_NAME, + inputsClass = IsTPUEmbeddingInitialized.Inputs.class +) +public final class IsTPUEmbeddingInitialized extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "IsTPUEmbeddingInitialized"; + + private Output isTpuEmbeddingInitialized; + + public IsTPUEmbeddingInitialized(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + isTpuEmbeddingInitialized = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new IsTPUEmbeddingInitialized operation. + * + * @param scope current scope + * @return a new instance of IsTPUEmbeddingInitialized + */ + @Endpoint( + describeByClass = true + ) + public static IsTPUEmbeddingInitialized create(Scope scope) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IsTPUEmbeddingInitialized"); + return new IsTPUEmbeddingInitialized(opBuilder.build()); + } + + /** + * Gets isTpuEmbeddingInitialized. + * + * @return isTpuEmbeddingInitialized. + */ + public Output isTpuEmbeddingInitialized() { + return isTpuEmbeddingInitialized; + } + + @Override + public Output asOutput() { + return isTpuEmbeddingInitialized; + } + + @OpInputsMetadata( + outputsClass = IsTPUEmbeddingInitialized.class + ) + public static class Inputs extends RawOpInputs { + public Inputs(GraphOperation op) { + super(new IsTPUEmbeddingInitialized(op), op, Arrays.asList()); + int inputIndex = 0; + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java deleted file mode 100644 index 954bd247d65..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingADAMParametersGradAccumDebug.java +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load ADAM embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingADAMParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingADAMParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingADAMParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingADAMParametersGradAccumDebug"; - - public LoadTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingADAMParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the ADAM optimization algorithm. - * @param momenta Value of momenta used in the ADAM optimization algorithm. - * @param velocities Value of velocities used in the ADAM optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the ADAM optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingADAMParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingADAMParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand momenta, Operand velocities, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingADAMParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(momenta.asOutput()); - opBuilder.addInput(velocities.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingADAMParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingADAMParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingADAMParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the ADAM optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of momenta used in the ADAM optimization algorithm. - */ - public final Operand momenta; - - /** - * Value of velocities used in the ADAM optimization algorithm. - */ - public final Operand velocities; - - /** - * Value of gradient_accumulators used in the ADAM optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingADAMParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - momenta = (Operand) op.input(inputIndex++); - velocities = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java deleted file mode 100644 index 0aaa8ccf6d3..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load Adadelta parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingAdadeltaParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug"; - - public LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingAdadeltaParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the Adadelta optimization algorithm. - * @param accumulators Value of accumulators used in the Adadelta optimization algorithm. - * @param updates Value of updates used in the Adadelta optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the Adadelta optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingAdadeltaParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingAdadeltaParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand accumulators, Operand updates, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(accumulators.asOutput()); - opBuilder.addInput(updates.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingAdadeltaParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the Adadelta optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of accumulators used in the Adadelta optimization algorithm. - */ - public final Operand accumulators; - - /** - * Value of updates used in the Adadelta optimization algorithm. - */ - public final Operand updates; - - /** - * Value of gradient_accumulators used in the Adadelta optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - accumulators = (Operand) op.input(inputIndex++); - updates = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java similarity index 75% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java index e373ad25faa..0ee03fd5365 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingAdagradMomentumParameters.java @@ -31,7 +31,7 @@ import org.tensorflow.types.TFloat32; /** - * Load Adagrad embedding parameters with debug support. + * Load Adagrad Momentum embedding parameters. * An op that loads optimization parameters into HBM for embedding. Must be * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct * embedding table configuration. For example, this op is used to install @@ -39,41 +39,41 @@ * executed. */ @OpMetadata( - opType = LoadTPUEmbeddingAdagradParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingAdagradParametersGradAccumDebug.Inputs.class + opType = LoadTPUEmbeddingAdagradMomentumParameters.OP_NAME, + inputsClass = LoadTPUEmbeddingAdagradMomentumParameters.Inputs.class ) -public final class LoadTPUEmbeddingAdagradParametersGradAccumDebug extends RawOp { +public final class LoadTPUEmbeddingAdagradMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "LoadTPUEmbeddingAdagradParametersGradAccumDebug"; + public static final String OP_NAME = "LoadTPUEmbeddingAdagradMomentumParameters"; - public LoadTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { + public LoadTPUEmbeddingAdagradMomentumParameters(Operation operation) { super(operation, OP_NAME); } /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingAdagradParametersGradAccumDebug operation. + * Factory method to create a class wrapping a new LoadTPUEmbeddingAdagradMomentumParameters operation. * * @param scope current scope - * @param parameters Value of parameters used in the Adagrad optimization algorithm. - * @param accumulators Value of accumulators used in the Adagrad optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the Adagrad optimization algorithm. + * @param parameters Value of parameters used in the Adagrad Momentum optimization algorithm. + * @param accumulators Value of accumulators used in the Adagrad Momentum optimization algorithm. + * @param momenta Value of momenta used in the Adagrad Momentum optimization algorithm. * @param numShards The value of the numShards attribute * @param shardId The value of the shardId attribute * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingAdagradParametersGradAccumDebug + * @return a new instance of LoadTPUEmbeddingAdagradMomentumParameters */ @Endpoint( describeByClass = true ) - public static LoadTPUEmbeddingAdagradParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand accumulators, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingAdagradParametersGradAccumDebug"); + public static LoadTPUEmbeddingAdagradMomentumParameters create(Scope scope, + Operand parameters, Operand accumulators, Operand momenta, + Long numShards, Long shardId, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingAdagradMomentumParameters"); opBuilder.addInput(parameters.asOutput()); opBuilder.addInput(accumulators.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); + opBuilder.addInput(momenta.asOutput()); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); if (options != null) { @@ -89,7 +89,7 @@ public static LoadTPUEmbeddingAdagradParametersGradAccumDebug create(Scope scope } } } - return new LoadTPUEmbeddingAdagradParametersGradAccumDebug(opBuilder.build()); + return new LoadTPUEmbeddingAdagradMomentumParameters(opBuilder.build()); } /** @@ -123,7 +123,7 @@ public static Options config(String config) { } /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingAdagradParametersGradAccumDebug} + * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingAdagradMomentumParameters} */ public static class Options { private Long tableId; @@ -170,23 +170,23 @@ public Options config(String config) { } @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingAdagradParametersGradAccumDebug.class + outputsClass = LoadTPUEmbeddingAdagradMomentumParameters.class ) - public static class Inputs extends RawOpInputs { + public static class Inputs extends RawOpInputs { /** - * Value of parameters used in the Adagrad optimization algorithm. + * Value of parameters used in the Adagrad Momentum optimization algorithm. */ public final Operand parameters; /** - * Value of accumulators used in the Adagrad optimization algorithm. + * Value of accumulators used in the Adagrad Momentum optimization algorithm. */ public final Operand accumulators; /** - * Value of gradient_accumulators used in the Adagrad optimization algorithm. + * Value of momenta used in the Adagrad Momentum optimization algorithm. */ - public final Operand gradientAccumulators; + public final Operand momenta; /** * The tableId attribute @@ -214,11 +214,11 @@ public static class Inputs extends RawOpInputs) op.input(inputIndex++); accumulators = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); + momenta = (Operand) op.input(inputIndex++); tableId = op.attributes().getAttrInt("table_id"); tableName = op.attributes().getAttrString("table_name"); numShards = op.attributes().getAttrInt("num_shards"); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java deleted file mode 100644 index 40369244b28..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFTRLParametersGradAccumDebug.java +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load FTRL embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingFTRLParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingFTRLParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingFTRLParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingFTRLParametersGradAccumDebug"; - - public LoadTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingFTRLParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the FTRL optimization algorithm. - * @param accumulators Value of accumulators used in the FTRL optimization algorithm. - * @param linears Value of linears used in the FTRL optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the FTRL optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingFTRLParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingFTRLParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand accumulators, Operand linears, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingFTRLParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(accumulators.asOutput()); - opBuilder.addInput(linears.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingFTRLParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingFTRLParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingFTRLParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the FTRL optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of accumulators used in the FTRL optimization algorithm. - */ - public final Operand accumulators; - - /** - * Value of linears used in the FTRL optimization algorithm. - */ - public final Operand linears; - - /** - * Value of gradient_accumulators used in the FTRL optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingFTRLParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - accumulators = (Operand) op.input(inputIndex++); - linears = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java similarity index 97% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java index e3a751e5fb0..37897d3c047 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/LoadTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingFrequencyEstimatorParameters.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.tpu; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -121,7 +121,7 @@ public static Options config(String config) { } /** - * Optional attributes for {@link org.tensorflow.op.rawops.LoadTPUEmbeddingFrequencyEstimatorParameters} + * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingFrequencyEstimatorParameters} */ public static class Options { private Long tableId; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java deleted file mode 100644 index 56967a766c6..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingMomentumParametersGradAccumDebug.java +++ /dev/null @@ -1,229 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load Momentum embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingMomentumParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingMomentumParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingMomentumParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingMomentumParametersGradAccumDebug"; - - public LoadTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingMomentumParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the Momentum optimization algorithm. - * @param momenta Value of momenta used in the Momentum optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the Momentum optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingMomentumParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingMomentumParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand momenta, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingMomentumParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(momenta.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingMomentumParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingMomentumParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingMomentumParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the Momentum optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of momenta used in the Momentum optimization algorithm. - */ - public final Operand momenta; - - /** - * Value of gradient_accumulators used in the Momentum optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingMomentumParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - momenta = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java deleted file mode 100644 index 2096552062b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ /dev/null @@ -1,229 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load proximal Adagrad embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug"; - - public LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the proximal Adagrad optimization algorithm. - * @param accumulators Value of accumulators used in the proximal Adagrad optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the proximal Adagrad optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand accumulators, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(accumulators.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the proximal Adagrad optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of accumulators used in the proximal Adagrad optimization algorithm. - */ - public final Operand accumulators; - - /** - * Value of gradient_accumulators used in the proximal Adagrad optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - accumulators = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java deleted file mode 100644 index 94a1f0926ab..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ /dev/null @@ -1,232 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * The LoadTPUEmbeddingProximalYogiParametersGradAccumDebug operation - */ -@OpMetadata( - opType = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingProximalYogiParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug"; - - public LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingProximalYogiParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters The parameters value - * @param v The v value - * @param m The m value - * @param gradientAccumulators The gradientAccumulators value - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingProximalYogiParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingProximalYogiParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand v, Operand m, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(v.asOutput()); - opBuilder.addInput(m.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingProximalYogiParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingProximalYogiParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The parameters input - */ - public final Operand parameters; - - /** - * The v input - */ - public final Operand v; - - /** - * The m input - */ - public final Operand m; - - /** - * The gradientAccumulators input - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingProximalYogiParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - v = (Operand) op.input(inputIndex++); - m = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java deleted file mode 100644 index c6d8c3a4800..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load RMSProp embedding parameters with debug support. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingRMSPropParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingRMSPropParametersGradAccumDebug"; - - public LoadTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingRMSPropParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the RMSProp optimization algorithm. - * @param ms Value of ms used in the RMSProp optimization algorithm. - * @param mom Value of mom used in the RMSProp optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the RMSProp optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingRMSPropParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingRMSPropParametersGradAccumDebug create(Scope scope, - Operand parameters, Operand ms, Operand mom, - Operand gradientAccumulators, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(ms.asOutput()); - opBuilder.addInput(mom.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingRMSPropParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingRMSPropParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingRMSPropParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the RMSProp optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of ms used in the RMSProp optimization algorithm. - */ - public final Operand ms; - - /** - * Value of mom used in the RMSProp optimization algorithm. - */ - public final Operand mom; - - /** - * Value of gradient_accumulators used in the RMSProp optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingRMSPropParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - ms = (Operand) op.input(inputIndex++); - mom = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java deleted file mode 100644 index 01307c9da5e..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ /dev/null @@ -1,221 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operand; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Load SGD embedding parameters. - * An op that loads optimization parameters into HBM for embedding. Must be - * preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - * embedding table configuration. For example, this op is used to install - * parameters that are loaded from a checkpoint before a training loop is - * executed. - */ -@OpMetadata( - opType = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.OP_NAME, - inputsClass = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.Inputs.class -) -public final class LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"; - - public LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - } - - /** - * Factory method to create a class wrapping a new LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug operation. - * - * @param scope current scope - * @param parameters Value of parameters used in the stochastic gradient descent optimization algorithm. - * @param gradientAccumulators Value of gradient_accumulators used in the Adadelta optimization algorithm. - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug create( - Scope scope, Operand parameters, Operand gradientAccumulators, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"); - opBuilder.addInput(parameters.asOutput()); - opBuilder.addInput(gradientAccumulators.asOutput()); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * Value of parameters used in the stochastic gradient descent optimization algorithm. - */ - public final Operand parameters; - - /** - * Value of gradient_accumulators used in the Adadelta optimization algorithm. - */ - public final Operand gradientAccumulators; - - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - parameters = (Operand) op.input(inputIndex++); - gradientAccumulators = (Operand) op.input(inputIndex++); - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java deleted file mode 100644 index e598b5ec412..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingADAMParametersGradAccumDebug.java +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve ADAM embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingADAMParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingADAMParametersGradAccumDebug"; - - private Output parameters; - - private Output momenta; - - private Output velocities; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingADAMParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - momenta = operation.output(outputIdx++); - velocities = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingADAMParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingADAMParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingADAMParametersGradAccumDebug create(Scope scope, Long numShards, - Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingADAMParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the ADAM optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets momenta. - * Parameter momenta updated by the ADAM optimization algorithm. - * @return momenta. - */ - public Output momenta() { - return momenta; - } - - /** - * Gets velocities. - * Parameter velocities updated by the ADAM optimization algorithm. - * @return velocities. - */ - public Output velocities() { - return velocities; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the ADAM optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingADAMParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingADAMParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingADAMParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java deleted file mode 100644 index 054fdbfc2cf..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.java +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve Adadelta embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug"; - - private Output parameters; - - private Output accumulators; - - private Output updates; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - accumulators = operation.output(outputIdx++); - updates = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the Adadelta optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets accumulators. - * Parameter accumulators updated by the Adadelta optimization algorithm. - * @return accumulators. - */ - public Output accumulators() { - return accumulators; - } - - /** - * Gets updates. - * Parameter updates updated by the Adadelta optimization algorithm. - * @return updates. - */ - public Output updates() { - return updates; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the Adadelta optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java similarity index 77% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java index cf2c37361ec..c8f9664bc6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingAdagradMomentumParameters.java @@ -31,51 +31,51 @@ import org.tensorflow.types.TFloat32; /** - * Retrieve Adagrad embedding parameters with debug support. + * Retrieve Adagrad Momentum embedding parameters. * An op that retrieves optimization parameters from embedding to host * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up * the correct embedding table configuration. For example, this op is * used to retrieve updated parameters before saving a checkpoint. */ @OpMetadata( - opType = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.Inputs.class + opType = RetrieveTPUEmbeddingAdagradMomentumParameters.OP_NAME, + inputsClass = RetrieveTPUEmbeddingAdagradMomentumParameters.Inputs.class ) -public final class RetrieveTPUEmbeddingAdagradParametersGradAccumDebug extends RawOp { +public final class RetrieveTPUEmbeddingAdagradMomentumParameters extends RawOp { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug"; + public static final String OP_NAME = "RetrieveTPUEmbeddingAdagradMomentumParameters"; private Output parameters; private Output accumulators; - private Output gradientAccumulators; + private Output momenta; - public RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(Operation operation) { + public RetrieveTPUEmbeddingAdagradMomentumParameters(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; parameters = operation.output(outputIdx++); accumulators = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); + momenta = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingAdagradParametersGradAccumDebug operation. + * Factory method to create a class wrapping a new RetrieveTPUEmbeddingAdagradMomentumParameters operation. * * @param scope current scope * @param numShards The value of the numShards attribute * @param shardId The value of the shardId attribute * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingAdagradParametersGradAccumDebug + * @return a new instance of RetrieveTPUEmbeddingAdagradMomentumParameters */ @Endpoint( describeByClass = true ) - public static RetrieveTPUEmbeddingAdagradParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug"); + public static RetrieveTPUEmbeddingAdagradMomentumParameters create(Scope scope, Long numShards, + Long shardId, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingAdagradMomentumParameters"); opBuilder.setAttr("num_shards", numShards); opBuilder.setAttr("shard_id", shardId); if (options != null) { @@ -91,7 +91,7 @@ public static RetrieveTPUEmbeddingAdagradParametersGradAccumDebug create(Scope s } } } - return new RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(opBuilder.build()); + return new RetrieveTPUEmbeddingAdagradMomentumParameters(opBuilder.build()); } /** @@ -126,7 +126,7 @@ public static Options config(String config) { /** * Gets parameters. - * Parameter parameters updated by the Adagrad optimization algorithm. + * Parameter parameters updated by the Adagrad Momentum optimization algorithm. * @return parameters. */ public Output parameters() { @@ -135,7 +135,7 @@ public Output parameters() { /** * Gets accumulators. - * Parameter accumulators updated by the Adagrad optimization algorithm. + * Parameter accumulators updated by the Adagrad Momentum optimization algorithm. * @return accumulators. */ public Output accumulators() { @@ -143,16 +143,16 @@ public Output accumulators() { } /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the Adagrad optimization algorithm. - * @return gradientAccumulators. + * Gets momenta. + * Parameter momenta updated by the Adagrad Momentum optimization algorithm. + * @return momenta. */ - public Output gradientAccumulators() { - return gradientAccumulators; + public Output momenta() { + return momenta; } /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdagradParametersGradAccumDebug} + * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingAdagradMomentumParameters} */ public static class Options { private Long tableId; @@ -199,9 +199,9 @@ public Options config(String config) { } @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.class + outputsClass = RetrieveTPUEmbeddingAdagradMomentumParameters.class ) - public static class Inputs extends RawOpInputs { + public static class Inputs extends RawOpInputs { /** * The tableId attribute */ @@ -228,7 +228,7 @@ public static class Inputs extends RawOpInputs parameters; - - private Output accumulators; - - private Output linears; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - accumulators = operation.output(outputIdx++); - linears = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingFTRLParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingFTRLParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingFTRLParametersGradAccumDebug create(Scope scope, Long numShards, - Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the FTRL optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets accumulators. - * Parameter accumulators updated by the FTRL optimization algorithm. - * @return accumulators. - */ - public Output accumulators() { - return accumulators; - } - - /** - * Gets linears. - * Parameter linears updated by the FTRL optimization algorithm. - * @return linears. - */ - public Output linears() { - return linears; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the FTRL optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingFTRLParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java similarity index 97% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java index e50431dc817..9a32a72afbc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/rawops/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingFrequencyEstimatorParameters.java @@ -15,7 +15,7 @@ // This class has been generated, DO NOT EDIT! -package org.tensorflow.op.rawops; +package org.tensorflow.op.tpu; import java.util.Arrays; import org.tensorflow.GraphOperation; @@ -141,7 +141,7 @@ public Output lastHitStep() { } /** - * Optional attributes for {@link org.tensorflow.op.rawops.RetrieveTPUEmbeddingFrequencyEstimatorParameters} + * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingFrequencyEstimatorParameters} */ public static class Options { private Long tableId; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java deleted file mode 100644 index aa51de4d404..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.java +++ /dev/null @@ -1,240 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve Momentum embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingMomentumParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug"; - - private Output parameters; - - private Output momenta; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - momenta = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingMomentumParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingMomentumParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingMomentumParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the Momentum optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets momenta. - * Parameter momenta updated by the Momentum optimization algorithm. - * @return momenta. - */ - public Output momenta() { - return momenta; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the Momentum optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingMomentumParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java deleted file mode 100644 index 3565cbce042..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.java +++ /dev/null @@ -1,240 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve proximal Adagrad embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug"; - - private Output parameters; - - private Output accumulators; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - accumulators = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the proximal Adagrad optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets accumulators. - * Parameter accumulators updated by the proximal Adagrad optimization algorithm. - * @return accumulators. - */ - public Output accumulators() { - return accumulators; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java deleted file mode 100644 index 61c55552cd7..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.java +++ /dev/null @@ -1,248 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * The RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug operation - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug"; - - private Output parameters; - - private Output v; - - private Output m; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - v = operation.output(outputIdx++); - m = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets v. - * - * @return v. - */ - public Output v() { - return v; - } - - /** - * Gets m. - * - * @return m. - */ - public Output m() { - return m; - } - - /** - * Gets gradientAccumulators. - * - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java deleted file mode 100644 index ee671e633d7..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.java +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve RMSProp embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug"; - - private Output parameters; - - private Output ms; - - private Output mom; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - ms = operation.output(outputIdx++); - mom = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug create(Scope scope, - Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the RMSProp optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets ms. - * Parameter ms updated by the RMSProp optimization algorithm. - * @return ms. - */ - public Output ms() { - return ms; - } - - /** - * Gets mom. - * Parameter mom updated by the RMSProp optimization algorithm. - * @return mom. - */ - public Output mom() { - return mom; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the RMSProp optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java deleted file mode 100644 index 6941a062872..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.java +++ /dev/null @@ -1,229 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -=======================================================================*/ - -// This class has been generated, DO NOT EDIT! - -package org.tensorflow.op.tpu; - -import java.util.Arrays; -import org.tensorflow.GraphOperation; -import org.tensorflow.Operation; -import org.tensorflow.OperationBuilder; -import org.tensorflow.Output; -import org.tensorflow.op.RawOp; -import org.tensorflow.op.RawOpInputs; -import org.tensorflow.op.Scope; -import org.tensorflow.op.annotation.Endpoint; -import org.tensorflow.op.annotation.OpInputsMetadata; -import org.tensorflow.op.annotation.OpMetadata; -import org.tensorflow.types.TFloat32; - -/** - * Retrieve SGD embedding parameters with debug support. - * An op that retrieves optimization parameters from embedding to host - * memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - * the correct embedding table configuration. For example, this op is - * used to retrieve updated parameters before saving a checkpoint. - */ -@OpMetadata( - opType = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.OP_NAME, - inputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.Inputs.class -) -public final class RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug extends RawOp { - /** - * The name of this op, as known by TensorFlow core engine - */ - public static final String OP_NAME = "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"; - - private Output parameters; - - private Output gradientAccumulators; - - public RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug( - Operation operation) { - super(operation, OP_NAME); - int outputIdx = 0; - parameters = operation.output(outputIdx++); - gradientAccumulators = operation.output(outputIdx++); - } - - /** - * Factory method to create a class wrapping a new RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug operation. - * - * @param scope current scope - * @param numShards The value of the numShards attribute - * @param shardId The value of the shardId attribute - * @param options carries optional attribute values - * @return a new instance of RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug - */ - @Endpoint( - describeByClass = true - ) - public static RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug create( - Scope scope, Long numShards, Long shardId, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug"); - opBuilder.setAttr("num_shards", numShards); - opBuilder.setAttr("shard_id", shardId); - if (options != null) { - for (Options opts : options) { - if (opts.tableId != null) { - opBuilder.setAttr("table_id", opts.tableId); - } - if (opts.tableName != null) { - opBuilder.setAttr("table_name", opts.tableName); - } - if (opts.config != null) { - opBuilder.setAttr("config", opts.config); - } - } - } - return new RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(opBuilder.build()); - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public static Options tableId(Long tableId) { - return new Options().tableId(tableId); - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public static Options tableName(String tableName) { - return new Options().tableName(tableName); - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public static Options config(String config) { - return new Options().config(config); - } - - /** - * Gets parameters. - * Parameter parameters updated by the stochastic gradient descent optimization algorithm. - * @return parameters. - */ - public Output parameters() { - return parameters; - } - - /** - * Gets gradientAccumulators. - * Parameter gradient_accumulators updated by the Adadelta optimization algorithm. - * @return gradientAccumulators. - */ - public Output gradientAccumulators() { - return gradientAccumulators; - } - - /** - * Optional attributes for {@link org.tensorflow.op.tpu.RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug} - */ - public static class Options { - private Long tableId; - - private String tableName; - - private String config; - - private Options() { - } - - /** - * Sets the tableId option. - * - * @param tableId the tableId option - * @return this Options instance. - */ - public Options tableId(Long tableId) { - this.tableId = tableId; - return this; - } - - /** - * Sets the tableName option. - * - * @param tableName the tableName option - * @return this Options instance. - */ - public Options tableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * Sets the config option. - * - * @param config the config option - * @return this Options instance. - */ - public Options config(String config) { - this.config = config; - return this; - } - } - - @OpInputsMetadata( - outputsClass = RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.class - ) - public static class Inputs extends RawOpInputs { - /** - * The tableId attribute - */ - public final long tableId; - - /** - * The tableName attribute - */ - public final String tableName; - - /** - * The numShards attribute - */ - public final long numShards; - - /** - * The shardId attribute - */ - public final long shardId; - - /** - * The config attribute - */ - public final String config; - - public Inputs(GraphOperation op) { - super(new RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(op), op, Arrays.asList("table_id", "table_name", "num_shards", "shard_id", "config")); - int inputIndex = 0; - tableId = op.attributes().getAttrInt("table_id"); - tableName = op.attributes().getAttrString("table_name"); - numShards = op.attributes().getAttrInt("num_shards"); - shardId = op.attributes().getAttrInt("shard_id"); - config = op.attributes().getAttrString("config"); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java index 689acdd1364..d32dec983bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java @@ -35,10 +35,10 @@ /** * Update '*var' according to the Adam algorithm. - * $$lr_t := \text{learning_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ - * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ - * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ - * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * * @param data type for {@code out} output */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java index a0fa6645311..cd5c1a243e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceApplyAdam.java @@ -34,10 +34,10 @@ /** * Update '*var' according to the Adam algorithm. - * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ - * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ */ @OpMetadata( opType = ResourceApplyAdam.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java new file mode 100644 index 00000000000..3ce3b44888d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java @@ -0,0 +1,133 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; + +/** + * Wraps the XLA AllReduce operator + * documented at https://www.tensorflow.org/xla/operation_semantics#allreduce. + * + * @param data type for {@code output} output + */ +@OpMetadata( + opType = AllReduce.OP_NAME, + inputsClass = AllReduce.Inputs.class +) +@Operator( + group = "xla" +) +public final class AllReduce extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaAllReduce"; + + private Output output; + + public AllReduce(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaAllReduce operation. + * + * @param scope current scope + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param reduceOp Reduction computation. + * @param data type for {@code XlaAllReduce} output and operands + * @return a new instance of AllReduce + */ + @Endpoint( + describeByClass = true + ) + public static AllReduce create(Scope scope, Operand input, + Operand groupAssignment, String reduceOp) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AllReduce"); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(groupAssignment.asOutput()); + opBuilder.setAttr("reduce_op", reduceOp); + return new AllReduce<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + @OpInputsMetadata( + outputsClass = AllReduce.class + ) + public static class Inputs extends RawOpInputs> { + /** + * Array or a non-empty tuple of arrays to reduce across replicas. + */ + public final Operand input; + + /** + * Groups between which the reductions are performed. + */ + public final Operand groupAssignment; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Reduction computation. + */ + public final String reduceOp; + + public Inputs(GraphOperation op) { + super(new AllReduce<>(op), op, Arrays.asList("T", "reduce_op")); + int inputIndex = 0; + input = (Operand) op.input(inputIndex++); + groupAssignment = (Operand) op.input(inputIndex++); + T = op.attributes().getAttrType("T"); + reduceOp = op.attributes().getAttrString("reduce_op"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java new file mode 100644 index 00000000000..4445f0a9c6f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AssignVariableConcatND.java @@ -0,0 +1,244 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Concats input tensor across all dimensions. + * An op which merges slices the input tensor based on the given num_splits + * attribute, strips paddings optionally, and writes the merged tensor without + * paddings to the resource variable. + *

    This op may be generated via the TPU bridge. + *

    For example, with {@code input} tensor: + *

    + * [[0, 1],
    + *  [4, 5]]
    + * [[2, 3],
    + *  [6, 7]]
    + * [[8, 9],
    + *  [12, 13]]
    + * [[10, 11],
    + *  [14, 15]]
    + * 
    + *

    {@code num_splits}: + *

    + * [2, 2]
    + * 
    + *

    and {@code paddings}: + *

    + * [1, 1]
    + * 
    + *

    the expected {@code outputs} is: + *

    + * [[0, 1, 2],
    + *  [4, 5, 6],
    + *  [8, 9, 10]]
    + * 
    + */ +@OpMetadata( + opType = AssignVariableConcatND.OP_NAME, + inputsClass = AssignVariableConcatND.Inputs.class +) +public final class AssignVariableConcatND extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "AssignVariableXlaConcatND"; + + public AssignVariableConcatND(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new AssignVariableXlaConcatND operation. + * + * @param scope current scope + * @param resource Resource variable for concatenated input tensors across all dimensions. + * } + * in_arg { + * name: "inputs" + * description: <<END + * Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * } + * out_arg { + * name: "output" + * description: <<END + * Output tensor formed from merging input slices based on num_concats defined. + * @param inputs The inputs value + * @param numConcats Number of ways to merge per dimension. + * @param options carries optional attribute values + * @return a new instance of AssignVariableConcatND + */ + @Endpoint( + describeByClass = true + ) + public static AssignVariableConcatND create(Scope scope, Operand resource, + Iterable> inputs, List numConcats, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AssignVariableConcatND"); + opBuilder.addInput(resource.asOutput()); + opBuilder.addInputList(Operands.asOutputs(inputs)); + long[] numConcatsArray = new long[numConcats.size()]; + for (int i = 0 ; i < numConcatsArray.length ; i++) { + numConcatsArray[i] = numConcats.get(i); + } + opBuilder.setAttr("num_concats", numConcatsArray); + if (options != null) { + for (Options opts : options) { + if (opts.paddings != null) { + long[] paddingsArray = new long[opts.paddings.size()]; + for (int i = 0 ; i < paddingsArray.length ; i++) { + paddingsArray[i] = opts.paddings.get(i); + } + opBuilder.setAttr("paddings", paddingsArray); + } + } + } + return new AssignVariableConcatND(opBuilder.build()); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public static Options paddings(List paddings) { + return new Options().paddings(paddings); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public static Options paddings(Long... paddings) { + return new Options().paddings(paddings); + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.AssignVariableConcatND} + */ + public static class Options { + private List paddings; + + private Options() { + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public Options paddings(List paddings) { + this.paddings = paddings; + return this; + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public Options paddings(Long... paddings) { + this.paddings = Arrays.asList(paddings); + return this; + } + } + + @OpInputsMetadata( + outputsClass = AssignVariableConcatND.class + ) + public static class Inputs extends RawOpInputs { + /** + * Resource variable for concatenated input tensors across all dimensions. + * } + * in_arg { + * name: "inputs" + * description: <<END + * Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * } + * out_arg { + * name: "output" + * description: <<END + * Output tensor formed from merging input slices based on num_concats defined. + */ + public final Operand resource; + + /** + * The inputs input + */ + public final Iterable> inputs; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Number of ways to merge per dimension. + */ + public final long[] numConcats; + + /** + * Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + */ + public final long[] paddings; + + public Inputs(GraphOperation op) { + super(new AssignVariableConcatND(op), op, Arrays.asList("T", "num_concats", "paddings")); + int inputIndex = 0; + resource = (Operand) op.input(inputIndex++); + int inputsLength = op.inputListLength("inputs"); + inputs = Arrays.asList((Operand[]) op.inputList(inputIndex, inputsLength)); + inputIndex += inputsLength; + T = op.attributes().getAttrType("T"); + numConcats = op.attributes().getAttrIntList("num_concats"); + paddings = op.attributes().getAttrIntList("paddings"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java new file mode 100644 index 00000000000..c888ddbe394 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java @@ -0,0 +1,248 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Concats input tensor across all dimensions. + * An op which merges slices the input tensor based on the given num_splits + * attribute, strips paddings optionally, and returns the merged tensor without + * paddings. + *

    This op may be generated via the TPU bridge. + *

    For example, with {@code input} tensor: + *

    + * [[0, 1],
    + *  [4, 5]]
    + * [[2, 3],
    + *  [6, 7]]
    + * [[8, 9],
    + *  [12, 13]]
    + * [[10, 11],
    + *  [14, 15]]
    + * 
    + *

    {@code num_splits}: + *

    + * [2, 2]
    + * 
    + *

    and {@code paddings}: + *

    + * [1, 1]
    + * 
    + *

    the expected {@code outputs} is: + *

    + * [[0, 1, 2],
    + *  [4, 5, 6],
    + *  [8, 9, 10]]
    + * 
    + * + * @param data type for {@code output} output + */ +@OpMetadata( + opType = ConcatND.OP_NAME, + inputsClass = ConcatND.Inputs.class +) +public final class ConcatND extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaConcatND"; + + private Output output; + + public ConcatND(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaConcatND operation. + * + * @param scope current scope + * @param inputs Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * } + * out_arg { + * name: "output" + * description: <<END + * Output tensor formed from merging input slices based on num_concats defined. + * @param numConcats Number of ways to merge per dimension. + * @param options carries optional attribute values + * @param data type for {@code XlaConcatND} output and operands + * @return a new instance of ConcatND + */ + @Endpoint( + describeByClass = true + ) + public static ConcatND create(Scope scope, Iterable> inputs, + List numConcats, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConcatND"); + opBuilder.addInputList(Operands.asOutputs(inputs)); + long[] numConcatsArray = new long[numConcats.size()]; + for (int i = 0 ; i < numConcatsArray.length ; i++) { + numConcatsArray[i] = numConcats.get(i); + } + opBuilder.setAttr("num_concats", numConcatsArray); + if (options != null) { + for (Options opts : options) { + if (opts.paddings != null) { + long[] paddingsArray = new long[opts.paddings.size()]; + for (int i = 0 ; i < paddingsArray.length ; i++) { + paddingsArray[i] = opts.paddings.get(i); + } + opBuilder.setAttr("paddings", paddingsArray); + } + } + } + return new ConcatND<>(opBuilder.build()); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public static Options paddings(List paddings) { + return new Options().paddings(paddings); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public static Options paddings(Long... paddings) { + return new Options().paddings(paddings); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.ConcatND} + */ + public static class Options { + private List paddings; + + private Options() { + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public Options paddings(List paddings) { + this.paddings = paddings; + return this; + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + * @return this Options instance. + */ + public Options paddings(Long... paddings) { + this.paddings = Arrays.asList(paddings); + return this; + } + } + + @OpInputsMetadata( + outputsClass = ConcatND.class + ) + public static class Inputs extends RawOpInputs> { + /** + * Input tensor slices in row-major order to merge across all dimensions. All + * inputs must have the same shape. + * } + * out_arg { + * name: "output" + * description: <<END + * Output tensor formed from merging input slices based on num_concats defined. + */ + public final Iterable> inputs; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Number of ways to merge per dimension. + */ + public final long[] numConcats; + + /** + * Optional list of right paddings per dimension to strip from the final merged + * tensor. These paddings must not exceed the dimension size of the merged result + * prior to stripping paddings. + */ + public final long[] paddings; + + public Inputs(GraphOperation op) { + super(new ConcatND<>(op), op, Arrays.asList("T", "num_concats", "paddings")); + int inputIndex = 0; + int inputsLength = op.inputListLength("inputs"); + inputs = Arrays.asList((Operand[]) op.inputList(inputIndex, inputsLength)); + inputIndex += inputsLength; + T = op.attributes().getAttrType("T"); + numConcats = op.attributes().getAttrIntList("num_concats"); + paddings = op.attributes().getAttrIntList("paddings"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java new file mode 100644 index 00000000000..a4a9c8cfbb5 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java @@ -0,0 +1,251 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Splits resource variable input tensor across all dimensions. + * An op which splits the resource variable input tensor based on the given + * num_splits attribute, pads slices optionally, and returned the slices. Slices + * are returned in row-major order. + *

    This op may be generated via the TPU bridge. + *

    For example, with {@code input} tensor: + *

    + * [[0, 1, 2],
    + *  [3, 4, 5],
    + *  [6, 7, 8]]
    + * 
    + *

    {@code num_splits}: + *

    + * [2, 2]
    + * 
    + *

    and {@code paddings}: + *

    + * [1, 1]
    + * 
    + *

    the expected {@code outputs} is: + *

    + * [[0, 1],
    + *  [3, 4]]
    + * [[2, 0],
    + *  [5, 0]]
    + * [[6, 7],
    + *  [0, 0]]
    + * [[8, 0],
    + *  [0, 0]]
    + * 
    + * + * @param data type for {@code outputs} output + */ +@OpMetadata( + opType = ReadVariableSplitND.OP_NAME, + inputsClass = ReadVariableSplitND.Inputs.class +) +public final class ReadVariableSplitND extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "ReadVariableXlaSplitND"; + + private List> outputs; + + @SuppressWarnings("unchecked") + public ReadVariableSplitND(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int outputsLength = operation.outputListLength("outputs"); + outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); + outputIdx += outputsLength; + } + + /** + * Factory method to create a class wrapping a new ReadVariableXlaSplitND operation. + * + * @param scope current scope + * @param resource Resource variable of input tensor to split across all dimensions. + * } + * out_arg { + * name: "outputs" + * description: <<END + * Output slices based on input and num_splits defined, in row-major order. + * @param T The value of the T attribute + * @param N The value of the N attribute + * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + * @param options carries optional attribute values + * @param data type for {@code ReadVariableXlaSplitND} output and operands + * @return a new instance of ReadVariableSplitND + */ + @Endpoint( + describeByClass = true + ) + public static ReadVariableSplitND create(Scope scope, + Operand resource, Class T, Long N, List numSplits, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ReadVariableSplitND"); + opBuilder.addInput(resource.asOutput()); + opBuilder.setAttr("T", Operands.toDataType(T)); + opBuilder.setAttr("N", N); + long[] numSplitsArray = new long[numSplits.size()]; + for (int i = 0 ; i < numSplitsArray.length ; i++) { + numSplitsArray[i] = numSplits.get(i); + } + opBuilder.setAttr("num_splits", numSplitsArray); + if (options != null) { + for (Options opts : options) { + if (opts.paddings != null) { + long[] paddingsArray = new long[opts.paddings.size()]; + for (int i = 0 ; i < paddingsArray.length ; i++) { + paddingsArray[i] = opts.paddings.get(i); + } + opBuilder.setAttr("paddings", paddingsArray); + } + } + } + return new ReadVariableSplitND<>(opBuilder.build()); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public static Options paddings(List paddings) { + return new Options().paddings(paddings); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public static Options paddings(Long... paddings) { + return new Options().paddings(paddings); + } + + /** + * Gets outputs. + * + * @return outputs. + */ + public List> outputs() { + return outputs; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) outputs.iterator(); + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.ReadVariableSplitND} + */ + public static class Options { + private List paddings; + + private Options() { + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public Options paddings(List paddings) { + this.paddings = paddings; + return this; + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public Options paddings(Long... paddings) { + this.paddings = Arrays.asList(paddings); + return this; + } + } + + @OpInputsMetadata( + outputsClass = ReadVariableSplitND.class + ) + public static class Inputs extends RawOpInputs> { + /** + * Resource variable of input tensor to split across all dimensions. + * } + * out_arg { + * name: "outputs" + * description: <<END + * Output slices based on input and num_splits defined, in row-major order. + */ + public final Operand resource; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + */ + public final long[] numSplits; + + /** + * Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + */ + public final long[] paddings; + + public Inputs(GraphOperation op) { + super(new ReadVariableSplitND<>(op), op, Arrays.asList("T", "num_splits", "paddings")); + int inputIndex = 0; + resource = (Operand) op.input(inputIndex++); + T = op.attributes().getAttrType("T"); + numSplits = op.attributes().getAttrIntList("num_splits"); + paddings = op.attributes().getAttrIntList("paddings"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceScatter.java new file mode 100644 index 00000000000..84753f2713f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReduceScatter.java @@ -0,0 +1,141 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; + +/** + * Wraps the XLA ReduceScatter operator + * documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter. + * + * @param data type for {@code output} output + */ +@OpMetadata( + opType = ReduceScatter.OP_NAME, + inputsClass = ReduceScatter.Inputs.class +) +@Operator( + group = "xla" +) +public final class ReduceScatter extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaReduceScatter"; + + private Output output; + + public ReduceScatter(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaReduceScatter operation. + * + * @param scope current scope + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param scatterDimension Dimension to scatter. + * @param reduceOp Reduction computation. + * @param data type for {@code XlaReduceScatter} output and operands + * @return a new instance of ReduceScatter + */ + @Endpoint( + describeByClass = true + ) + public static ReduceScatter create(Scope scope, Operand input, + Operand groupAssignment, Operand scatterDimension, String reduceOp) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ReduceScatter"); + opBuilder.addInput(input.asOutput()); + opBuilder.addInput(groupAssignment.asOutput()); + opBuilder.addInput(scatterDimension.asOutput()); + opBuilder.setAttr("reduce_op", reduceOp); + return new ReduceScatter<>(opBuilder.build()); + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @Override + public Output asOutput() { + return output; + } + + @OpInputsMetadata( + outputsClass = ReduceScatter.class + ) + public static class Inputs extends RawOpInputs> { + /** + * Array or a non-empty tuple of arrays to reduce across replicas. + */ + public final Operand input; + + /** + * Groups between which the reductions are performed. + */ + public final Operand groupAssignment; + + /** + * Dimension to scatter. + */ + public final Operand scatterDimension; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Reduction computation. + */ + public final String reduceOp; + + public Inputs(GraphOperation op) { + super(new ReduceScatter<>(op), op, Arrays.asList("T", "reduce_op")); + int inputIndex = 0; + input = (Operand) op.input(inputIndex++); + groupAssignment = (Operand) op.input(inputIndex++); + scatterDimension = (Operand) op.input(inputIndex++); + T = op.attributes().getAttrType("T"); + reduceOp = op.attributes().getAttrString("reduce_op"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java index 56f0bf33738..bc416f83497 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RemoveDynamicDimensionSize.java @@ -35,11 +35,9 @@ import org.tensorflow.types.family.TType; /** - * Inverse of XlaSetDynamicDimensionSize. Make an xla bounded - *
    - *     dynamic dimension into a static dimension. The bound of the size of
    - *     dimension `dim_index` becomes the static dimension size.
    - * 
    + * Inverse of XlaSetDynamicDimensionSize. + * Make an xla bounded dynamic dimension into a static dimension. The bound of the + * size of dimension {@code dim_index} becomes the static dimension size. * * @param data type for {@code output} output */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RngBitGenerator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RngBitGenerator.java new file mode 100644 index 00000000000..bb7b03e8b99 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/RngBitGenerator.java @@ -0,0 +1,157 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; +import org.tensorflow.types.family.TType; + +/** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for {@code output} output + */ +@OpMetadata( + opType = RngBitGenerator.OP_NAME, + inputsClass = RngBitGenerator.Inputs.class +) +@Operator( + group = "xla" +) +public final class RngBitGenerator extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaRngBitGenerator"; + + private Output outputKey; + + private Output output; + + @SuppressWarnings("unchecked") + public RngBitGenerator(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + outputKey = operation.output(outputIdx++); + output = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new XlaRngBitGenerator operation. + * + * @param scope current scope + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for {@code XlaRngBitGenerator} output and operands + * @return a new instance of RngBitGenerator + */ + @Endpoint( + describeByClass = true + ) + public static RngBitGenerator create(Scope scope, + Operand algorithm, Operand initialState, + Operand shape, Class dtype) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "RngBitGenerator"); + opBuilder.addInput(algorithm.asOutput()); + opBuilder.addInput(initialState.asOutput()); + opBuilder.addInput(shape.asOutput()); + opBuilder.setAttr("dtype", Operands.toDataType(dtype)); + return new RngBitGenerator<>(opBuilder.build()); + } + + /** + * Gets outputKey. + * + * @return outputKey. + */ + public Output outputKey() { + return outputKey; + } + + /** + * Gets output. + * + * @return output. + */ + public Output output() { + return output; + } + + @OpInputsMetadata( + outputsClass = RngBitGenerator.class + ) + public static class Inputs extends RawOpInputs> { + /** + * The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + */ + public final Operand algorithm; + + /** + * Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + */ + public final Operand initialState; + + /** + * The output shape of the generated data. + */ + public final Operand shape; + + /** + * The type of the tensor. + */ + public final DataType dtype; + + /** + * The Tshape attribute + */ + public final DataType Tshape; + + public Inputs(GraphOperation op) { + super(new RngBitGenerator<>(op), op, Arrays.asList("dtype", "Tshape")); + int inputIndex = 0; + algorithm = (Operand) op.input(inputIndex++); + initialState = (Operand) op.input(inputIndex++); + shape = (Operand) op.input(inputIndex++); + dtype = op.attributes().getAttrType("dtype"); + Tshape = op.attributes().getAttrType("Tshape"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java index 6fb001ae681..621d49d332a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Sharding.java @@ -18,6 +18,7 @@ package org.tensorflow.op.xla; import java.util.Arrays; +import java.util.List; import org.tensorflow.GraphOperation; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -34,7 +35,9 @@ import org.tensorflow.types.family.TType; /** - * An op which shards the input based on the given sharding attribute. + * An op which shards the input based on the given sharding attribute. It can + * selectively annotate a subset of tensor dimensions by skipping unspecified_dims, + * and the sharding annotation should be replicated in those dims. * * @param data type for {@code output} output */ @@ -80,6 +83,13 @@ public static Sharding create(Scope scope, Operand input if (opts.sharding != null) { opBuilder.setAttr("sharding", opts.sharding); } + if (opts.unspecifiedDims != null) { + long[] unspecifiedDimsArray = new long[opts.unspecifiedDims.size()]; + for (int i = 0 ; i < unspecifiedDimsArray.length ; i++) { + unspecifiedDimsArray[i] = opts.unspecifiedDims.get(i); + } + opBuilder.setAttr("unspecified_dims", unspecifiedDimsArray); + } } } return new Sharding<>(opBuilder.build()); @@ -95,6 +105,26 @@ public static Options sharding(String sharding) { return new Options().sharding(sharding); } + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(List unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(Long... unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + /** * Gets output. * @@ -115,6 +145,8 @@ public Output asOutput() { public static class Options { private String sharding; + private List unspecifiedDims; + private Options() { } @@ -128,6 +160,28 @@ public Options sharding(String sharding) { this.sharding = sharding; return this; } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(List unspecifiedDims) { + this.unspecifiedDims = unspecifiedDims; + return this; + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(Long... unspecifiedDims) { + this.unspecifiedDims = Arrays.asList(unspecifiedDims); + return this; + } } @OpInputsMetadata( @@ -149,12 +203,18 @@ public static class Inputs extends RawOpInputs> { */ public final String sharding; + /** + * The unspecifiedDims attribute + */ + public final long[] unspecifiedDims; + public Inputs(GraphOperation op) { - super(new Sharding<>(op), op, Arrays.asList("T", "sharding")); + super(new Sharding<>(op), op, Arrays.asList("T", "sharding", "unspecified_dims")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); sharding = op.attributes().getAttrString("sharding"); + unspecifiedDims = op.attributes().getAttrIntList("unspecified_dims"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java new file mode 100644 index 00000000000..c0a59e2fce4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java @@ -0,0 +1,247 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.xla; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Splits input tensor across all dimensions. + * An op which slices the input tensor based on the given num_splits attribute, + * pads slices optionally, and returned the slices. Slices are returned in + * row-major order. + *

    This op may be generated via the TPU bridge. + *

    For example, with {@code input} tensor: + *

    + * [[0, 1, 2],
    + *  [3, 4, 5],
    + *  [6, 7, 8]]
    + * 
    + *

    {@code num_splits}: + *

    + * [2, 2]
    + * 
    + *

    and {@code paddings}: + *

    + * [1, 1]
    + * 
    + *

    the expected {@code outputs} is: + *

    + * [[0, 1],
    + *  [3, 4]]
    + * [[2, 0],
    + *  [5, 0]]
    + * [[6, 7],
    + *  [0, 0]]
    + * [[8, 0],
    + *  [0, 0]]
    + * 
    + * + * @param data type for {@code outputs} output + */ +@OpMetadata( + opType = SplitND.OP_NAME, + inputsClass = SplitND.Inputs.class +) +public final class SplitND extends RawOp implements Iterable> { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "XlaSplitND"; + + private List> outputs; + + @SuppressWarnings("unchecked") + public SplitND(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + int outputsLength = operation.outputListLength("outputs"); + outputs = Arrays.asList((Output[]) operation.outputList(outputIdx, outputsLength)); + outputIdx += outputsLength; + } + + /** + * Factory method to create a class wrapping a new XlaSplitND operation. + * + * @param scope current scope + * @param input Input tensor to split across all dimensions. + * } + * out_arg { + * name: "outputs" + * description: <<END + * Output slices based on input and num_splits defined, in row-major order. + * @param N The value of the N attribute + * @param numSplits Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + * @param options carries optional attribute values + * @param data type for {@code XlaSplitND} output and operands + * @return a new instance of SplitND + */ + @Endpoint( + describeByClass = true + ) + public static SplitND create(Scope scope, Operand input, Long N, + List numSplits, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SplitND"); + opBuilder.addInput(input.asOutput()); + opBuilder.setAttr("N", N); + long[] numSplitsArray = new long[numSplits.size()]; + for (int i = 0 ; i < numSplitsArray.length ; i++) { + numSplitsArray[i] = numSplits.get(i); + } + opBuilder.setAttr("num_splits", numSplitsArray); + if (options != null) { + for (Options opts : options) { + if (opts.paddings != null) { + long[] paddingsArray = new long[opts.paddings.size()]; + for (int i = 0 ; i < paddingsArray.length ; i++) { + paddingsArray[i] = opts.paddings.get(i); + } + opBuilder.setAttr("paddings", paddingsArray); + } + } + } + return new SplitND<>(opBuilder.build()); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public static Options paddings(List paddings) { + return new Options().paddings(paddings); + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public static Options paddings(Long... paddings) { + return new Options().paddings(paddings); + } + + /** + * Gets outputs. + * + * @return outputs. + */ + public List> outputs() { + return outputs; + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Iterator> iterator() { + return (Iterator) outputs.iterator(); + } + + /** + * Optional attributes for {@link org.tensorflow.op.xla.SplitND} + */ + public static class Options { + private List paddings; + + private Options() { + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public Options paddings(List paddings) { + this.paddings = paddings; + return this; + } + + /** + * Sets the paddings option. + * + * @param paddings Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + * @return this Options instance. + */ + public Options paddings(Long... paddings) { + this.paddings = Arrays.asList(paddings); + return this; + } + } + + @OpInputsMetadata( + outputsClass = SplitND.class + ) + public static class Inputs extends RawOpInputs> { + /** + * Input tensor to split across all dimensions. + * } + * out_arg { + * name: "outputs" + * description: <<END + * Output slices based on input and num_splits defined, in row-major order. + */ + public final Operand input; + + /** + * The T attribute + */ + public final DataType T; + + /** + * Number of ways to split per dimension. Shape dimensions must be evenly + * divisible. + */ + public final long[] numSplits; + + /** + * Optional list of right paddings per dimension of input tensor to apply before + * splitting. This can be used to make a dimension evenly divisible. + */ + public final long[] paddings; + + public Inputs(GraphOperation op) { + super(new SplitND<>(op), op, Arrays.asList("T", "num_splits", "paddings")); + int inputIndex = 0; + input = (Operand) op.input(inputIndex++); + T = op.attributes().getAttrType("T"); + numSplits = op.attributes().getAttrIntList("num_splits"); + paddings = op.attributes().getAttrIntList("paddings"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java index e8cfd9a709d..55c130ebb4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdFullToShardShape.java @@ -18,6 +18,7 @@ package org.tensorflow.op.xla; import java.util.Arrays; +import java.util.List; import org.tensorflow.GraphOperation; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -39,6 +40,8 @@ * partitioned) with the same sharding used by manual partitioning, and outputs a * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. + * The conversion can happen partially in subgroups, by specifying the dim + * attribute, where only that dim will be converted. * * @param data type for {@code output} output */ @@ -69,6 +72,7 @@ public SpmdFullToShardShape(Operation operation) { * @param scope current scope * @param input The input value * @param manualSharding The value of the manualSharding attribute + * @param options carries optional attribute values * @param data type for {@code XlaSpmdFullToShardShape} output and operands * @return a new instance of SpmdFullToShardShape */ @@ -76,13 +80,57 @@ public SpmdFullToShardShape(Operation operation) { describeByClass = true ) public static SpmdFullToShardShape create(Scope scope, Operand input, - String manualSharding) { + String manualSharding, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SpmdFullToShardShape"); opBuilder.addInput(input.asOutput()); opBuilder.setAttr("manual_sharding", manualSharding); + if (options != null) { + for (Options opts : options) { + if (opts.dim != null) { + opBuilder.setAttr("dim", opts.dim); + } + if (opts.unspecifiedDims != null) { + long[] unspecifiedDimsArray = new long[opts.unspecifiedDims.size()]; + for (int i = 0 ; i < unspecifiedDimsArray.length ; i++) { + unspecifiedDimsArray[i] = opts.unspecifiedDims.get(i); + } + opBuilder.setAttr("unspecified_dims", unspecifiedDimsArray); + } + } + } return new SpmdFullToShardShape<>(opBuilder.build()); } + /** + * Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + */ + public static Options dim(Long dim) { + return new Options().dim(dim); + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(List unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(Long... unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + /** * Gets output. * @@ -97,6 +145,51 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.xla.SpmdFullToShardShape} + */ + public static class Options { + private Long dim; + + private List unspecifiedDims; + + private Options() { + } + + /** + * Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + */ + public Options dim(Long dim) { + this.dim = dim; + return this; + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(List unspecifiedDims) { + this.unspecifiedDims = unspecifiedDims; + return this; + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(Long... unspecifiedDims) { + this.unspecifiedDims = Arrays.asList(unspecifiedDims); + return this; + } + } + @OpInputsMetadata( outputsClass = SpmdFullToShardShape.class ) @@ -116,12 +209,24 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "manual_sharding")); + super(new SpmdFullToShardShape<>(op), op, Arrays.asList("T", "manual_sharding", "dim", "unspecified_dims")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); manualSharding = op.attributes().getAttrString("manual_sharding"); + dim = op.attributes().getAttrInt("dim"); + unspecifiedDims = op.attributes().getAttrIntList("unspecified_dims"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java index 5cba20d5891..38e41732d3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SpmdShardToFullShape.java @@ -18,6 +18,7 @@ package org.tensorflow.op.xla; import java.util.Arrays; +import java.util.List; import org.tensorflow.GraphOperation; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -38,7 +39,8 @@ * An op used by XLA SPMD partitioner to switch from manual partitioning to * automatic partitioning. It converts the shard-shaped, manually partitioned input * into full-shaped tensor to be partitioned automatically with the same sharding - * used by manual partitioning. + * used by manual partitioning. The conversion can happen partially in subgroups, + * by specifying the dim attribute, where only that dim will be converted. * * @param data type for {@code output} output */ @@ -70,6 +72,7 @@ public SpmdShardToFullShape(Operation operation) { * @param input The input value * @param manualSharding The value of the manualSharding attribute * @param fullShape The value of the fullShape attribute + * @param options carries optional attribute values * @param data type for {@code XlaSpmdShardToFullShape} output and operands * @return a new instance of SpmdShardToFullShape */ @@ -77,14 +80,58 @@ public SpmdShardToFullShape(Operation operation) { describeByClass = true ) public static SpmdShardToFullShape create(Scope scope, Operand input, - String manualSharding, Shape fullShape) { + String manualSharding, Shape fullShape, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SpmdShardToFullShape"); opBuilder.addInput(input.asOutput()); opBuilder.setAttr("manual_sharding", manualSharding); opBuilder.setAttr("full_shape", fullShape); + if (options != null) { + for (Options opts : options) { + if (opts.dim != null) { + opBuilder.setAttr("dim", opts.dim); + } + if (opts.unspecifiedDims != null) { + long[] unspecifiedDimsArray = new long[opts.unspecifiedDims.size()]; + for (int i = 0 ; i < unspecifiedDimsArray.length ; i++) { + unspecifiedDimsArray[i] = opts.unspecifiedDims.get(i); + } + opBuilder.setAttr("unspecified_dims", unspecifiedDimsArray); + } + } + } return new SpmdShardToFullShape<>(opBuilder.build()); } + /** + * Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + */ + public static Options dim(Long dim) { + return new Options().dim(dim); + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(List unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public static Options unspecifiedDims(Long... unspecifiedDims) { + return new Options().unspecifiedDims(unspecifiedDims); + } + /** * Gets output. * @@ -99,6 +146,51 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.xla.SpmdShardToFullShape} + */ + public static class Options { + private Long dim; + + private List unspecifiedDims; + + private Options() { + } + + /** + * Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + */ + public Options dim(Long dim) { + this.dim = dim; + return this; + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(List unspecifiedDims) { + this.unspecifiedDims = unspecifiedDims; + return this; + } + + /** + * Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public Options unspecifiedDims(Long... unspecifiedDims) { + this.unspecifiedDims = Arrays.asList(unspecifiedDims); + return this; + } + } + @OpInputsMetadata( outputsClass = SpmdShardToFullShape.class ) @@ -123,13 +215,25 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "manual_sharding", "full_shape")); + super(new SpmdShardToFullShape<>(op), op, Arrays.asList("T", "manual_sharding", "full_shape", "dim", "unspecified_dims")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); manualSharding = op.attributes().getAttrString("manual_sharding"); fullShape = op.attributes().getAttrShape("full_shape"); + dim = op.attributes().getAttrInt("dim"); + unspecifiedDims = op.attributes().getAttrIntList("unspecified_dims"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java index 4ae22b1c842..4255b2658a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaVariadicReduce.java @@ -41,8 +41,8 @@ * Wraps the variadic XLA Reduce operator. * Semantics are documented at * https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. - * - * @param data type for {@code output} output + *

    This is an expanded version of XlaVariadicReduce, with support for + * operands of different dtypes, and improved shape inference. */ @OpMetadata( opType = XlaVariadicReduce.OP_NAME, @@ -51,85 +51,83 @@ @Operator( group = "xla" ) -public final class XlaVariadicReduce extends RawOp implements Iterable> { +public final class XlaVariadicReduce extends RawOp implements Iterable> { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "XlaVariadicReduce"; + public static final String OP_NAME = "XlaVariadicReduceV2"; - private List> output; + private List> outputs; @SuppressWarnings("unchecked") public XlaVariadicReduce(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; - int outputLength = operation.outputListLength("output"); - output = Arrays.asList((Output[]) operation.outputList(outputIdx, outputLength)); - outputIdx += outputLength; + int outputsLength = operation.outputListLength("outputs"); + outputs = Arrays.asList(operation.outputList(outputIdx, outputsLength)); + outputIdx += outputsLength; } /** - * Factory method to create a class wrapping a new XlaVariadicReduce operation. + * Factory method to create a class wrapping a new XlaVariadicReduceV2 operation. * * @param scope current scope - * @param input the input tensor(s) - * @param initValue scalar initial value(s) for the reduction + * @param inputs the input tensor(s) + * @param initValues scalar initial value(s) for the reduction * @param dimensionsToReduce dimension numbers over which to reduce * @param reducer a reducer function to apply - * @param data type for {@code XlaVariadicReduce} output and operands * @return a new instance of XlaVariadicReduce */ @Endpoint( describeByClass = true ) - public static XlaVariadicReduce create(Scope scope, - Iterable> input, Iterable> initValue, List dimensionsToReduce, - ConcreteFunction reducer) { + public static XlaVariadicReduce create(Scope scope, Iterable> inputs, + Iterable> initValues, List dimensionsToReduce, ConcreteFunction reducer) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "XlaVariadicReduce"); - opBuilder.addInputList(Operands.asOutputs(input)); - opBuilder.addInputList(Operands.asOutputs(initValue)); + opBuilder.addInputList(Operands.asOutputs(inputs)); + opBuilder.addInputList(Operands.asOutputs(initValues)); long[] dimensionsToReduceArray = new long[dimensionsToReduce.size()]; for (int i = 0 ; i < dimensionsToReduceArray.length ; i++) { dimensionsToReduceArray[i] = dimensionsToReduce.get(i); } opBuilder.setAttr("dimensions_to_reduce", dimensionsToReduceArray); opBuilder.setAttr("reducer", reducer); - return new XlaVariadicReduce<>(opBuilder.build()); + return new XlaVariadicReduce(opBuilder.build()); } /** - * Gets output. + * Gets outputs. * - * @return output. + * @return outputs. */ - public List> output() { - return output; + public List> outputs() { + return outputs; } @Override @SuppressWarnings({"rawtypes", "unchecked"}) - public Iterator> iterator() { - return (Iterator) output.iterator(); + public Iterator> iterator() { + return (Iterator) outputs.iterator(); } @OpInputsMetadata( outputsClass = XlaVariadicReduce.class ) - public static class Inputs extends RawOpInputs> { + public static class Inputs extends RawOpInputs { /** * the input tensor(s) */ - public final Iterable> input; + public final Iterable> inputs; /** * scalar initial value(s) for the reduction */ - public final Iterable> initValue; + public final Iterable> initValues; /** * The T attribute */ - public final DataType T; + public final DataType[] T; /** * dimension numbers over which to reduce @@ -137,15 +135,15 @@ public static class Inputs extends RawOpInputs(op), op, Arrays.asList("T", "dimensions_to_reduce")); + super(new XlaVariadicReduce(op), op, Arrays.asList("T", "dimensions_to_reduce")); int inputIndex = 0; - int inputLength = op.inputListLength("input"); - input = Arrays.asList((Operand[]) op.inputList(inputIndex, inputLength)); - inputIndex += inputLength; - int initValueLength = op.inputListLength("init_value"); - initValue = Arrays.asList((Operand[]) op.inputList(inputIndex, initValueLength)); - inputIndex += initValueLength; - T = op.attributes().getAttrType("T"); + int inputsLength = op.inputListLength("inputs"); + inputs = Arrays.asList((Operand[]) op.inputList(inputIndex, inputsLength)); + inputIndex += inputsLength; + int initValuesLength = op.inputListLength("init_values"); + initValues = Arrays.asList((Operand[]) op.inputList(inputIndex, initValuesLength)); + inputIndex += initValuesLength; + T = op.attributes().getAttrTypeList("T"); dimensionsToReduce = op.attributes().getAttrIntList("dimensions_to_reduce"); } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java new file mode 100644 index 00000000000..610ab74e7f2 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java @@ -0,0 +1,843 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + *

    + * next: 4
    + * 
    + * + * Protobuf type {@code tensorflow.data.AutotuneOptions} + */ +public final class AutotuneOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.AutotuneOptions) + AutotuneOptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use AutotuneOptions.newBuilder() to construct. + private AutotuneOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AutotuneOptions() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new AutotuneOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AutotuneOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + optionalEnabledCase_ = 1; + optionalEnabled_ = input.readBool(); + break; + } + case 16: { + optionalCpuBudgetCase_ = 2; + optionalCpuBudget_ = input.readInt32(); + break; + } + case 24: { + optionalRamBudgetCase_ = 3; + optionalRamBudget_ = input.readInt64(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_AutotuneOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.AutotuneOptions.class, org.tensorflow.proto.data.AutotuneOptions.Builder.class); + } + + private int optionalEnabledCase_ = 0; + private java.lang.Object optionalEnabled_; + public enum OptionalEnabledCase + implements com.google.protobuf.Internal.EnumLite { + ENABLED(1), + OPTIONALENABLED_NOT_SET(0); + private final int value; + private OptionalEnabledCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalEnabledCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalEnabledCase forNumber(int value) { + switch (value) { + case 1: return ENABLED; + case 0: return OPTIONALENABLED_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalEnabledCase + getOptionalEnabledCase() { + return OptionalEnabledCase.forNumber( + optionalEnabledCase_); + } + + private int optionalCpuBudgetCase_ = 0; + private java.lang.Object optionalCpuBudget_; + public enum OptionalCpuBudgetCase + implements com.google.protobuf.Internal.EnumLite { + CPU_BUDGET(2), + OPTIONALCPUBUDGET_NOT_SET(0); + private final int value; + private OptionalCpuBudgetCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalCpuBudgetCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalCpuBudgetCase forNumber(int value) { + switch (value) { + case 2: return CPU_BUDGET; + case 0: return OPTIONALCPUBUDGET_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalCpuBudgetCase + getOptionalCpuBudgetCase() { + return OptionalCpuBudgetCase.forNumber( + optionalCpuBudgetCase_); + } + + private int optionalRamBudgetCase_ = 0; + private java.lang.Object optionalRamBudget_; + public enum OptionalRamBudgetCase + implements com.google.protobuf.Internal.EnumLite { + RAM_BUDGET(3), + OPTIONALRAMBUDGET_NOT_SET(0); + private final int value; + private OptionalRamBudgetCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalRamBudgetCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalRamBudgetCase forNumber(int value) { + switch (value) { + case 3: return RAM_BUDGET; + case 0: return OPTIONALRAMBUDGET_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalRamBudgetCase + getOptionalRamBudgetCase() { + return OptionalRamBudgetCase.forNumber( + optionalRamBudgetCase_); + } + + public static final int ENABLED_FIELD_NUMBER = 1; + /** + * bool enabled = 1; + */ + public boolean getEnabled() { + if (optionalEnabledCase_ == 1) { + return (java.lang.Boolean) optionalEnabled_; + } + return false; + } + + public static final int CPU_BUDGET_FIELD_NUMBER = 2; + /** + * int32 cpu_budget = 2; + */ + public int getCpuBudget() { + if (optionalCpuBudgetCase_ == 2) { + return (java.lang.Integer) optionalCpuBudget_; + } + return 0; + } + + public static final int RAM_BUDGET_FIELD_NUMBER = 3; + /** + * int64 ram_budget = 3; + */ + public long getRamBudget() { + if (optionalRamBudgetCase_ == 3) { + return (java.lang.Long) optionalRamBudget_; + } + return 0L; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalEnabledCase_ == 1) { + output.writeBool( + 1, (boolean)((java.lang.Boolean) optionalEnabled_)); + } + if (optionalCpuBudgetCase_ == 2) { + output.writeInt32( + 2, (int)((java.lang.Integer) optionalCpuBudget_)); + } + if (optionalRamBudgetCase_ == 3) { + output.writeInt64( + 3, (long)((java.lang.Long) optionalRamBudget_)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalEnabledCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize( + 1, (boolean)((java.lang.Boolean) optionalEnabled_)); + } + if (optionalCpuBudgetCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size( + 2, (int)((java.lang.Integer) optionalCpuBudget_)); + } + if (optionalRamBudgetCase_ == 3) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size( + 3, (long)((java.lang.Long) optionalRamBudget_)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.AutotuneOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.AutotuneOptions other = (org.tensorflow.proto.data.AutotuneOptions) obj; + + if (!getOptionalEnabledCase().equals(other.getOptionalEnabledCase())) return false; + switch (optionalEnabledCase_) { + case 1: + if (getEnabled() + != other.getEnabled()) return false; + break; + case 0: + default: + } + if (!getOptionalCpuBudgetCase().equals(other.getOptionalCpuBudgetCase())) return false; + switch (optionalCpuBudgetCase_) { + case 2: + if (getCpuBudget() + != other.getCpuBudget()) return false; + break; + case 0: + default: + } + if (!getOptionalRamBudgetCase().equals(other.getOptionalRamBudgetCase())) return false; + switch (optionalRamBudgetCase_) { + case 3: + if (getRamBudget() + != other.getRamBudget()) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (optionalEnabledCase_) { + case 1: + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getEnabled()); + break; + case 0: + default: + } + switch (optionalCpuBudgetCase_) { + case 2: + hash = (37 * hash) + CPU_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + getCpuBudget(); + break; + case 0: + default: + } + switch (optionalRamBudgetCase_) { + case 3: + hash = (37 * hash) + RAM_BUDGET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getRamBudget()); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.AutotuneOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.AutotuneOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.AutotuneOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.AutotuneOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +   * next: 4
    +   * 
    + * + * Protobuf type {@code tensorflow.data.AutotuneOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.AutotuneOptions) + org.tensorflow.proto.data.AutotuneOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_AutotuneOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.AutotuneOptions.class, org.tensorflow.proto.data.AutotuneOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.AutotuneOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + optionalCpuBudgetCase_ = 0; + optionalCpuBudget_ = null; + optionalRamBudgetCase_ = 0; + optionalRamBudget_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_AutotuneOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.AutotuneOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.AutotuneOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.AutotuneOptions build() { + org.tensorflow.proto.data.AutotuneOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.AutotuneOptions buildPartial() { + org.tensorflow.proto.data.AutotuneOptions result = new org.tensorflow.proto.data.AutotuneOptions(this); + if (optionalEnabledCase_ == 1) { + result.optionalEnabled_ = optionalEnabled_; + } + if (optionalCpuBudgetCase_ == 2) { + result.optionalCpuBudget_ = optionalCpuBudget_; + } + if (optionalRamBudgetCase_ == 3) { + result.optionalRamBudget_ = optionalRamBudget_; + } + result.optionalEnabledCase_ = optionalEnabledCase_; + result.optionalCpuBudgetCase_ = optionalCpuBudgetCase_; + result.optionalRamBudgetCase_ = optionalRamBudgetCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.AutotuneOptions) { + return mergeFrom((org.tensorflow.proto.data.AutotuneOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.AutotuneOptions other) { + if (other == org.tensorflow.proto.data.AutotuneOptions.getDefaultInstance()) return this; + switch (other.getOptionalEnabledCase()) { + case ENABLED: { + setEnabled(other.getEnabled()); + break; + } + case OPTIONALENABLED_NOT_SET: { + break; + } + } + switch (other.getOptionalCpuBudgetCase()) { + case CPU_BUDGET: { + setCpuBudget(other.getCpuBudget()); + break; + } + case OPTIONALCPUBUDGET_NOT_SET: { + break; + } + } + switch (other.getOptionalRamBudgetCase()) { + case RAM_BUDGET: { + setRamBudget(other.getRamBudget()); + break; + } + case OPTIONALRAMBUDGET_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.AutotuneOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.AutotuneOptions) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalEnabledCase_ = 0; + private java.lang.Object optionalEnabled_; + public OptionalEnabledCase + getOptionalEnabledCase() { + return OptionalEnabledCase.forNumber( + optionalEnabledCase_); + } + + public Builder clearOptionalEnabled() { + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + onChanged(); + return this; + } + + private int optionalCpuBudgetCase_ = 0; + private java.lang.Object optionalCpuBudget_; + public OptionalCpuBudgetCase + getOptionalCpuBudgetCase() { + return OptionalCpuBudgetCase.forNumber( + optionalCpuBudgetCase_); + } + + public Builder clearOptionalCpuBudget() { + optionalCpuBudgetCase_ = 0; + optionalCpuBudget_ = null; + onChanged(); + return this; + } + + private int optionalRamBudgetCase_ = 0; + private java.lang.Object optionalRamBudget_; + public OptionalRamBudgetCase + getOptionalRamBudgetCase() { + return OptionalRamBudgetCase.forNumber( + optionalRamBudgetCase_); + } + + public Builder clearOptionalRamBudget() { + optionalRamBudgetCase_ = 0; + optionalRamBudget_ = null; + onChanged(); + return this; + } + + + /** + * bool enabled = 1; + */ + public boolean getEnabled() { + if (optionalEnabledCase_ == 1) { + return (java.lang.Boolean) optionalEnabled_; + } + return false; + } + /** + * bool enabled = 1; + */ + public Builder setEnabled(boolean value) { + optionalEnabledCase_ = 1; + optionalEnabled_ = value; + onChanged(); + return this; + } + /** + * bool enabled = 1; + */ + public Builder clearEnabled() { + if (optionalEnabledCase_ == 1) { + optionalEnabledCase_ = 0; + optionalEnabled_ = null; + onChanged(); + } + return this; + } + + /** + * int32 cpu_budget = 2; + */ + public int getCpuBudget() { + if (optionalCpuBudgetCase_ == 2) { + return (java.lang.Integer) optionalCpuBudget_; + } + return 0; + } + /** + * int32 cpu_budget = 2; + */ + public Builder setCpuBudget(int value) { + optionalCpuBudgetCase_ = 2; + optionalCpuBudget_ = value; + onChanged(); + return this; + } + /** + * int32 cpu_budget = 2; + */ + public Builder clearCpuBudget() { + if (optionalCpuBudgetCase_ == 2) { + optionalCpuBudgetCase_ = 0; + optionalCpuBudget_ = null; + onChanged(); + } + return this; + } + + /** + * int64 ram_budget = 3; + */ + public long getRamBudget() { + if (optionalRamBudgetCase_ == 3) { + return (java.lang.Long) optionalRamBudget_; + } + return 0L; + } + /** + * int64 ram_budget = 3; + */ + public Builder setRamBudget(long value) { + optionalRamBudgetCase_ = 3; + optionalRamBudget_ = value; + onChanged(); + return this; + } + /** + * int64 ram_budget = 3; + */ + public Builder clearRamBudget() { + if (optionalRamBudgetCase_ == 3) { + optionalRamBudgetCase_ = 0; + optionalRamBudget_ = null; + onChanged(); + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.AutotuneOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.AutotuneOptions) + private static final org.tensorflow.proto.data.AutotuneOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.AutotuneOptions(); + } + + public static org.tensorflow.proto.data.AutotuneOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutotuneOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AutotuneOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.AutotuneOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java new file mode 100644 index 00000000000..fefc95fb410 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java @@ -0,0 +1,30 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface AutotuneOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.AutotuneOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * bool enabled = 1; + */ + boolean getEnabled(); + + /** + * int32 cpu_budget = 2; + */ + int getCpuBudget(); + + /** + * int64 ram_budget = 3; + */ + long getRamBudget(); + + public org.tensorflow.proto.data.AutotuneOptions.OptionalEnabledCase getOptionalEnabledCase(); + + public org.tensorflow.proto.data.AutotuneOptions.OptionalCpuBudgetCase getOptionalCpuBudgetCase(); + + public org.tensorflow.proto.data.AutotuneOptions.OptionalRamBudgetCase getOptionalRamBudgetCase(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java new file mode 100644 index 00000000000..207a8af5191 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java @@ -0,0 +1,789 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/data_service.proto + +package org.tensorflow.proto.data; + +public final class DataService { + private DataService() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface ProcessingModeDefOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.ProcessingModeDef) + com.google.protobuf.MessageOrBuilder { + + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + int getShardingPolicyValue(); + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy getShardingPolicy(); + } + /** + * Protobuf type {@code tensorflow.data.ProcessingModeDef} + */ + public static final class ProcessingModeDef extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.ProcessingModeDef) + ProcessingModeDefOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProcessingModeDef.newBuilder() to construct. + private ProcessingModeDef(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ProcessingModeDef() { + shardingPolicy_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ProcessingModeDef(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcessingModeDef( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + shardingPolicy_ = rawValue; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_ProcessingModeDef_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DataService.ProcessingModeDef.class, org.tensorflow.proto.data.DataService.ProcessingModeDef.Builder.class); + } + + /** + *
    +     * Specifies how data is sharded among tf.data service workers.
    +     * 
    + * + * Protobuf enum {@code tensorflow.data.ProcessingModeDef.ShardingPolicy} + */ + public enum ShardingPolicy + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
    +       * No sharding will be performed. Each worker produces the entire dataset
    +       * without any sharding. With this mode, the best practice is to shuffle the
    +       * dataset nondeterministically so that workers process the dataset in
    +       * different orders.
    +       * 
    + * + * OFF = 0; + */ + OFF(0), + /** + *
    +       * The input dataset is dynamically split among workers at runtime. Each
    +       * worker gets the next split when it reads data from the dispatcher. There
    +       * is no fixed sharding with this mode.
    +       * 
    + * + * DYNAMIC = 1; + */ + DYNAMIC(1), + /** + *
    +       * The following are static sharding policies. The semantics are similar to
    +       * `tf.data.experimental.AutoShardPolicy`. These policies require:
    +       * * The tf.data service cluster has a fixed size, and you need to specify
    +       *   the workers in DispatcherConfig.
    +       * * Each client only reads from the local tf.data service worker.
    +       * Shards by input files (each worker will get a set of files to process).
    +       * When this option is selected, make sure that there is at least as many
    +       * files as workers. If there are fewer input files than workers, a runtime
    +       * error will be raised.
    +       * 
    + * + * FILE = 2; + */ + FILE(2), + /** + *
    +       * Shards by elements produced by the dataset. Each worker will process the
    +       * whole dataset and discard the portion that is not for itself. Note that
    +       * for this mode to correctly partitions the dataset elements, the dataset
    +       * needs to produce elements in a deterministic order.
    +       * 
    + * + * DATA = 3; + */ + DATA(3), + /** + *
    +       * Attempts FILE-based sharding, falling back to DATA-based sharding on
    +       * failures.
    +       * 
    + * + * FILE_OR_DATA = 4; + */ + FILE_OR_DATA(4), + /** + *
    +       * Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
    +       * placeholder to replace with `shard(num_workers, worker_index)`.
    +       * 
    + * + * HINT = 5; + */ + HINT(5), + UNRECOGNIZED(-1), + ; + + /** + *
    +       * No sharding will be performed. Each worker produces the entire dataset
    +       * without any sharding. With this mode, the best practice is to shuffle the
    +       * dataset nondeterministically so that workers process the dataset in
    +       * different orders.
    +       * 
    + * + * OFF = 0; + */ + public static final int OFF_VALUE = 0; + /** + *
    +       * The input dataset is dynamically split among workers at runtime. Each
    +       * worker gets the next split when it reads data from the dispatcher. There
    +       * is no fixed sharding with this mode.
    +       * 
    + * + * DYNAMIC = 1; + */ + public static final int DYNAMIC_VALUE = 1; + /** + *
    +       * The following are static sharding policies. The semantics are similar to
    +       * `tf.data.experimental.AutoShardPolicy`. These policies require:
    +       * * The tf.data service cluster has a fixed size, and you need to specify
    +       *   the workers in DispatcherConfig.
    +       * * Each client only reads from the local tf.data service worker.
    +       * Shards by input files (each worker will get a set of files to process).
    +       * When this option is selected, make sure that there is at least as many
    +       * files as workers. If there are fewer input files than workers, a runtime
    +       * error will be raised.
    +       * 
    + * + * FILE = 2; + */ + public static final int FILE_VALUE = 2; + /** + *
    +       * Shards by elements produced by the dataset. Each worker will process the
    +       * whole dataset and discard the portion that is not for itself. Note that
    +       * for this mode to correctly partitions the dataset elements, the dataset
    +       * needs to produce elements in a deterministic order.
    +       * 
    + * + * DATA = 3; + */ + public static final int DATA_VALUE = 3; + /** + *
    +       * Attempts FILE-based sharding, falling back to DATA-based sharding on
    +       * failures.
    +       * 
    + * + * FILE_OR_DATA = 4; + */ + public static final int FILE_OR_DATA_VALUE = 4; + /** + *
    +       * Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
    +       * placeholder to replace with `shard(num_workers, worker_index)`.
    +       * 
    + * + * HINT = 5; + */ + public static final int HINT_VALUE = 5; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ShardingPolicy valueOf(int value) { + return forNumber(value); + } + + public static ShardingPolicy forNumber(int value) { + switch (value) { + case 0: return OFF; + case 1: return DYNAMIC; + case 2: return FILE; + case 3: return DATA; + case 4: return FILE_OR_DATA; + case 5: return HINT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + ShardingPolicy> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ShardingPolicy findValueByNumber(int number) { + return ShardingPolicy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.ProcessingModeDef.getDescriptor().getEnumTypes().get(0); + } + + private static final ShardingPolicy[] VALUES = values(); + + public static ShardingPolicy valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ShardingPolicy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.ProcessingModeDef.ShardingPolicy) + } + + public static final int SHARDING_POLICY_FIELD_NUMBER = 1; + private int shardingPolicy_; + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public int getShardingPolicyValue() { + return shardingPolicy_; + } + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy getShardingPolicy() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy result = org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.valueOf(shardingPolicy_); + return result == null ? org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (shardingPolicy_ != org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.OFF.getNumber()) { + output.writeEnum(1, shardingPolicy_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (shardingPolicy_ != org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.OFF.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, shardingPolicy_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.DataService.ProcessingModeDef)) { + return super.equals(obj); + } + org.tensorflow.proto.data.DataService.ProcessingModeDef other = (org.tensorflow.proto.data.DataService.ProcessingModeDef) obj; + + if (shardingPolicy_ != other.shardingPolicy_) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SHARDING_POLICY_FIELD_NUMBER; + hash = (53 * hash) + shardingPolicy_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.ProcessingModeDef parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.DataService.ProcessingModeDef prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.data.ProcessingModeDef} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.ProcessingModeDef) + org.tensorflow.proto.data.DataService.ProcessingModeDefOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_ProcessingModeDef_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DataService.ProcessingModeDef.class, org.tensorflow.proto.data.DataService.ProcessingModeDef.Builder.class); + } + + // Construct using org.tensorflow.proto.data.DataService.ProcessingModeDef.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + shardingPolicy_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_ProcessingModeDef_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.ProcessingModeDef getDefaultInstanceForType() { + return org.tensorflow.proto.data.DataService.ProcessingModeDef.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.ProcessingModeDef build() { + org.tensorflow.proto.data.DataService.ProcessingModeDef result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.ProcessingModeDef buildPartial() { + org.tensorflow.proto.data.DataService.ProcessingModeDef result = new org.tensorflow.proto.data.DataService.ProcessingModeDef(this); + result.shardingPolicy_ = shardingPolicy_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.DataService.ProcessingModeDef) { + return mergeFrom((org.tensorflow.proto.data.DataService.ProcessingModeDef)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.DataService.ProcessingModeDef other) { + if (other == org.tensorflow.proto.data.DataService.ProcessingModeDef.getDefaultInstance()) return this; + if (other.shardingPolicy_ != 0) { + setShardingPolicyValue(other.getShardingPolicyValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.DataService.ProcessingModeDef parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.DataService.ProcessingModeDef) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int shardingPolicy_ = 0; + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public int getShardingPolicyValue() { + return shardingPolicy_; + } + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public Builder setShardingPolicyValue(int value) { + shardingPolicy_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy getShardingPolicy() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy result = org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.valueOf(shardingPolicy_); + return result == null ? org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy.UNRECOGNIZED : result; + } + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public Builder setShardingPolicy(org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy value) { + if (value == null) { + throw new NullPointerException(); + } + + shardingPolicy_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.ProcessingModeDef.ShardingPolicy sharding_policy = 1; + */ + public Builder clearShardingPolicy() { + + shardingPolicy_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.ProcessingModeDef) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.ProcessingModeDef) + private static final org.tensorflow.proto.data.DataService.ProcessingModeDef DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.DataService.ProcessingModeDef(); + } + + public static org.tensorflow.proto.data.DataService.ProcessingModeDef getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProcessingModeDef parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcessingModeDef(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.ProcessingModeDef getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_ProcessingModeDef_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n+tensorflow/core/protobuf/data_service." + + "proto\022\017tensorflow.data\"\267\001\n\021ProcessingMod" + + "eDef\022J\n\017sharding_policy\030\001 \001(\01621.tensorfl" + + "ow.data.ProcessingModeDef.ShardingPolicy" + + "\"V\n\016ShardingPolicy\022\007\n\003OFF\020\000\022\013\n\007DYNAMIC\020\001" + + "\022\010\n\004FILE\020\002\022\010\n\004DATA\020\003\022\020\n\014FILE_OR_DATA\020\004\022\010" + + "\n\004HINT\020\005Br\n\031org.tensorflow.proto.dataZUg" + + "ithub.com/tensorflow/tensorflow/tensorfl" + + "ow/go/core/protobuf/for_core_protos_go_p" + + "rotob\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_data_ProcessingModeDef_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_ProcessingModeDef_descriptor, + new java.lang.String[] { "ShardingPolicy", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetMetadata.java new file mode 100644 index 00000000000..8867f9700c1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetMetadata.java @@ -0,0 +1,543 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_metadata.proto + +package org.tensorflow.proto.data; + +public final class DatasetMetadata { + private DatasetMetadata() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface MetadataOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.Metadata) + com.google.protobuf.MessageOrBuilder { + + /** + * bytes name = 1; + */ + com.google.protobuf.ByteString getName(); + } + /** + *
    +   * next: 2
    +   * 
    + * + * Protobuf type {@code tensorflow.data.Metadata} + */ + public static final class Metadata extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.Metadata) + MetadataOrBuilder { + private static final long serialVersionUID = 0L; + // Use Metadata.newBuilder() to construct. + private Metadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Metadata() { + name_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Metadata(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Metadata( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + + name_ = input.readBytes(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetMetadata.internal_static_tensorflow_data_Metadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetMetadata.internal_static_tensorflow_data_Metadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DatasetMetadata.Metadata.class, org.tensorflow.proto.data.DatasetMetadata.Metadata.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString name_; + /** + * bytes name = 1; + */ + public com.google.protobuf.ByteString getName() { + return name_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!name_.isEmpty()) { + output.writeBytes(1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!name_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.DatasetMetadata.Metadata)) { + return super.equals(obj); + } + org.tensorflow.proto.data.DatasetMetadata.Metadata other = (org.tensorflow.proto.data.DatasetMetadata.Metadata) obj; + + if (!getName() + .equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DatasetMetadata.Metadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.DatasetMetadata.Metadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * next: 2
    +     * 
    + * + * Protobuf type {@code tensorflow.data.Metadata} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.Metadata) + org.tensorflow.proto.data.DatasetMetadata.MetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetMetadata.internal_static_tensorflow_data_Metadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetMetadata.internal_static_tensorflow_data_Metadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DatasetMetadata.Metadata.class, org.tensorflow.proto.data.DatasetMetadata.Metadata.Builder.class); + } + + // Construct using org.tensorflow.proto.data.DatasetMetadata.Metadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = com.google.protobuf.ByteString.EMPTY; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetMetadata.internal_static_tensorflow_data_Metadata_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetMetadata.Metadata getDefaultInstanceForType() { + return org.tensorflow.proto.data.DatasetMetadata.Metadata.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetMetadata.Metadata build() { + org.tensorflow.proto.data.DatasetMetadata.Metadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetMetadata.Metadata buildPartial() { + org.tensorflow.proto.data.DatasetMetadata.Metadata result = new org.tensorflow.proto.data.DatasetMetadata.Metadata(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.DatasetMetadata.Metadata) { + return mergeFrom((org.tensorflow.proto.data.DatasetMetadata.Metadata)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.DatasetMetadata.Metadata other) { + if (other == org.tensorflow.proto.data.DatasetMetadata.Metadata.getDefaultInstance()) return this; + if (other.getName() != com.google.protobuf.ByteString.EMPTY) { + setName(other.getName()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.DatasetMetadata.Metadata parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.DatasetMetadata.Metadata) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; + /** + * bytes name = 1; + */ + public com.google.protobuf.ByteString getName() { + return name_; + } + /** + * bytes name = 1; + */ + public Builder setName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * bytes name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.Metadata) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.Metadata) + private static final org.tensorflow.proto.data.DatasetMetadata.Metadata DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.DatasetMetadata.Metadata(); + } + + public static org.tensorflow.proto.data.DatasetMetadata.Metadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Metadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Metadata(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.DatasetMetadata.Metadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_Metadata_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_Metadata_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n0tensorflow/core/framework/dataset_meta" + + "data.proto\022\017tensorflow.data\"\030\n\010Metadata\022" + + "\014\n\004name\030\001 \001(\014Bt\n\031org.tensorflow.proto.da" + + "taZWgithub.com/tensorflow/tensorflow/ten" + + "sorflow/go/core/framework/dataset_metada" + + "ta_go_protob\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_data_Metadata_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_Metadata_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_Metadata_descriptor, + new java.lang.String[] { "Name", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java index 51ede7436dd..78ec0c7a58f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java @@ -14,6 +14,11 @@ public static void registerAllExtensions( registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_AutotuneOptions_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_tensorflow_data_DistributeOptions_descriptor; static final @@ -44,82 +49,88 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n/tensorflow/core/framework/dataset_opti" + - "ons.proto\022\017tensorflow.data\"\177\n\021Distribute" + - "Options\022;\n\021auto_shard_policy\030\001 \001(\0162 .ten" + - "sorflow.data.AutoShardPolicy\022\025\n\013num_devi" + - "ces\030\002 \001(\005H\000B\026\n\024optional_num_devices\"\270\006\n\023" + - "OptimizationOptions\022%\n\033apply_default_opt" + - "imizations\030\001 \001(\010H\000\022\022\n\010autotune\030\002 \001(\010H\001\022\032" + - "\n\020autotune_buffers\030\003 \001(\010H\002\022\035\n\023autotune_c" + - "pu_budget\030\004 \001(\005H\003\022\035\n\023autotune_ram_budget" + - "\030\005 \001(\003H\004\022\027\n\rfilter_fusion\030\006 \001(\010H\005\022\036\n\024map" + - "_and_batch_fusion\030\t \001(\010H\006\022\037\n\025map_and_fil" + - "ter_fusion\030\n \001(\010H\007\022\024\n\nmap_fusion\030\013 \001(\010H\010" + - "\022\035\n\023map_parallelization\030\014 \001(\010H\t\022\032\n\020noop_" + - "elimination\030\016 \001(\010H\n\022\030\n\016parallel_batch\030\017 " + - "\001(\010H\013\022#\n\031shuffle_and_repeat_fusion\030\021 \001(\010" + - "H\014B&\n$optional_apply_default_optimizatio" + - "nsB\023\n\021optional_autotuneB\033\n\031optional_auto" + - "tune_buffersB\036\n\034optional_autotune_cpu_bu" + - "dgetB\036\n\034optional_autotune_ram_budgetB\030\n\026" + - "optional_filter_fusionB\037\n\035optional_map_a" + - "nd_batch_fusionB \n\036optional_map_and_filt" + - "er_fusionB\025\n\023optional_map_fusionB\036\n\034opti" + - "onal_map_parallelizationB\033\n\031optional_noo" + - "p_eliminationB\031\n\027optional_parallel_batch" + - "B$\n\"optional_shuffle_and_repeat_fusionJ\004" + - "\010\007\020\010J\004\010\010\020\tJ\004\010\r\020\016J\004\010\020\020\021\"\242\001\n\020ThreadingOpti" + - "ons\022\"\n\030max_intra_op_parallelism\030\001 \001(\005H\000\022" + - "!\n\027private_threadpool_size\030\002 \001(\005H\001B#\n!op" + - "tional_max_intra_op_parallelismB\"\n optio" + - "nal_private_threadpool_size\"\212\003\n\007Options\022" + - "\027\n\rdeterministic\030\001 \001(\010H\000\022>\n\022distribute_o" + - "ptions\030\002 \001(\0132\".tensorflow.data.Distribut" + - "eOptions\022B\n\024optimization_options\030\003 \001(\0132$" + - ".tensorflow.data.OptimizationOptions\022\017\n\005" + - "slack\030\004 \001(\010H\001\022<\n\021threading_options\030\005 \001(\013" + - "2!.tensorflow.data.ThreadingOptions\022E\n\025e" + - "xternal_state_policy\030\006 \001(\0162$.tensorflow." + - "data.ExternalStatePolicyH\002B\030\n\026optional_d" + - "eterministicB\020\n\016optional_slackB \n\036option" + - "al_external_state_policy*K\n\017AutoShardPol" + - "icy\022\010\n\004AUTO\020\000\022\010\n\004FILE\020\001\022\010\n\004DATA\020\002\022\010\n\004HIN" + - "T\020\003\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001*J\n\023ExternalStatePo" + - "licy\022\017\n\013POLICY_WARN\020\000\022\021\n\rPOLICY_IGNORE\020\001" + - "\022\017\n\013POLICY_FAIL\020\002B\213\001\n\031org.tensorflow.pro" + - "to.dataB\024DatasetOptionsProtosP\001ZVgithub." + - "com/tensorflow/tensorflow/tensorflow/go/" + - "core/framework/dataset_options_go_protob" + - "\006proto3" + "ons.proto\022\017tensorflow.data\"\222\001\n\017AutotuneO" + + "ptions\022\021\n\007enabled\030\001 \001(\010H\000\022\024\n\ncpu_budget\030" + + "\002 \001(\005H\001\022\024\n\nram_budget\030\003 \001(\003H\002B\022\n\020optiona" + + "l_enabledB\025\n\023optional_cpu_budgetB\025\n\023opti" + + "onal_ram_budget\"\177\n\021DistributeOptions\022;\n\021" + + "auto_shard_policy\030\001 \001(\0162 .tensorflow.dat" + + "a.AutoShardPolicy\022\025\n\013num_devices\030\002 \001(\005H\000" + + "B\026\n\024optional_num_devices\"\360\004\n\023Optimizatio" + + "nOptions\022%\n\033apply_default_optimizations\030" + + "\001 \001(\010H\000\022\027\n\rfilter_fusion\030\006 \001(\010H\001\022\036\n\024map_" + + "and_batch_fusion\030\t \001(\010H\002\022\037\n\025map_and_filt" + + "er_fusion\030\n \001(\010H\003\022\024\n\nmap_fusion\030\013 \001(\010H\004\022" + + "\035\n\023map_parallelization\030\014 \001(\010H\005\022\032\n\020noop_e" + + "limination\030\016 \001(\010H\006\022\030\n\016parallel_batch\030\017 \001" + + "(\010H\007\022#\n\031shuffle_and_repeat_fusion\030\021 \001(\010H" + + "\010B&\n$optional_apply_default_optimization" + + "sB\030\n\026optional_filter_fusionB\037\n\035optional_" + + "map_and_batch_fusionB \n\036optional_map_and" + + "_filter_fusionB\025\n\023optional_map_fusionB\036\n" + + "\034optional_map_parallelizationB\033\n\031optiona" + + "l_noop_eliminationB\031\n\027optional_parallel_" + + "batchB$\n\"optional_shuffle_and_repeat_fus" + + "ionJ\004\010\002\020\003J\004\010\003\020\004J\004\010\004\020\005J\004\010\005\020\006J\004\010\007\020\010J\004\010\010\020\tJ" + + "\004\010\r\020\016J\004\010\020\020\021\"\242\001\n\020ThreadingOptions\022\"\n\030max_" + + "intra_op_parallelism\030\001 \001(\005H\000\022!\n\027private_" + + "threadpool_size\030\002 \001(\005H\001B#\n!optional_max_" + + "intra_op_parallelismB\"\n optional_private" + + "_threadpool_size\"\306\003\n\007Options\022\027\n\rdetermin" + + "istic\030\001 \001(\010H\000\022:\n\020autotune_options\030\007 \001(\0132" + + " .tensorflow.data.AutotuneOptions\022>\n\022dis" + + "tribute_options\030\002 \001(\0132\".tensorflow.data." + + "DistributeOptions\022B\n\024optimization_option" + + "s\030\003 \001(\0132$.tensorflow.data.OptimizationOp" + + "tions\022\017\n\005slack\030\004 \001(\010H\001\022<\n\021threading_opti" + + "ons\030\005 \001(\0132!.tensorflow.data.ThreadingOpt" + + "ions\022E\n\025external_state_policy\030\006 \001(\0162$.te" + + "nsorflow.data.ExternalStatePolicyH\002B\030\n\026o" + + "ptional_deterministicB\020\n\016optional_slackB" + + " \n\036optional_external_state_policy*K\n\017Aut" + + "oShardPolicy\022\010\n\004AUTO\020\000\022\010\n\004FILE\020\001\022\010\n\004DATA" + + "\020\002\022\010\n\004HINT\020\003\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001*J\n\023Extern" + + "alStatePolicy\022\017\n\013POLICY_WARN\020\000\022\021\n\rPOLICY" + + "_IGNORE\020\001\022\017\n\013POLICY_FAIL\020\002B\213\001\n\031org.tenso" + + "rflow.proto.dataB\024DatasetOptionsProtosP\001" + + "ZVgithub.com/tensorflow/tensorflow/tenso" + + "rflow/go/core/framework/dataset_options_" + + "go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { }); - internal_static_tensorflow_data_DistributeOptions_descriptor = + internal_static_tensorflow_data_AutotuneOptions_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_AutotuneOptions_descriptor, + new java.lang.String[] { "Enabled", "CpuBudget", "RamBudget", "OptionalEnabled", "OptionalCpuBudget", "OptionalRamBudget", }); + internal_static_tensorflow_data_DistributeOptions_descriptor = + getDescriptor().getMessageTypes().get(1); internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_DistributeOptions_descriptor, new java.lang.String[] { "AutoShardPolicy", "NumDevices", "OptionalNumDevices", }); internal_static_tensorflow_data_OptimizationOptions_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(2); internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_OptimizationOptions_descriptor, - new java.lang.String[] { "ApplyDefaultOptimizations", "Autotune", "AutotuneBuffers", "AutotuneCpuBudget", "AutotuneRamBudget", "FilterFusion", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "NoopElimination", "ParallelBatch", "ShuffleAndRepeatFusion", "OptionalApplyDefaultOptimizations", "OptionalAutotune", "OptionalAutotuneBuffers", "OptionalAutotuneCpuBudget", "OptionalAutotuneRamBudget", "OptionalFilterFusion", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalShuffleAndRepeatFusion", }); + new java.lang.String[] { "ApplyDefaultOptimizations", "FilterFusion", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "NoopElimination", "ParallelBatch", "ShuffleAndRepeatFusion", "OptionalApplyDefaultOptimizations", "OptionalFilterFusion", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalShuffleAndRepeatFusion", }); internal_static_tensorflow_data_ThreadingOptions_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_ThreadingOptions_descriptor, new java.lang.String[] { "MaxIntraOpParallelism", "PrivateThreadpoolSize", "OptionalMaxIntraOpParallelism", "OptionalPrivateThreadpoolSize", }); internal_static_tensorflow_data_Options_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_tensorflow_data_Options_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_Options_descriptor, - new java.lang.String[] { "Deterministic", "DistributeOptions", "OptimizationOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", }); + new java.lang.String[] { "Deterministic", "AutotuneOptions", "DistributeOptions", "OptimizationOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java index dba5ebe1b32..53414e39014 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DistributeOptions.java @@ -4,6 +4,10 @@ package org.tensorflow.proto.data; /** + *
    + * next: 3
    + * 
    + * * Protobuf type {@code tensorflow.data.DistributeOptions} */ public final class DistributeOptions extends @@ -337,6 +341,10 @@ protected Builder newBuilderForType( return builder; } /** + *
    +   * next: 3
    +   * 
    + * * Protobuf type {@code tensorflow.data.DistributeOptions} */ public static final class Builder extends diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java index eac7b3d4136..4298ad9684a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptions.java @@ -4,6 +4,10 @@ package org.tensorflow.proto.data; /** + *
    + * next: 18
    + * 
    + * * Protobuf type {@code tensorflow.data.OptimizationOptions} */ public final class OptimizationOptions extends @@ -53,26 +57,6 @@ private OptimizationOptions( optionalApplyDefaultOptimizations_ = input.readBool(); break; } - case 16: { - optionalAutotuneCase_ = 2; - optionalAutotune_ = input.readBool(); - break; - } - case 24: { - optionalAutotuneBuffersCase_ = 3; - optionalAutotuneBuffers_ = input.readBool(); - break; - } - case 32: { - optionalAutotuneCpuBudgetCase_ = 4; - optionalAutotuneCpuBudget_ = input.readInt32(); - break; - } - case 40: { - optionalAutotuneRamBudgetCase_ = 5; - optionalAutotuneRamBudget_ = input.readInt64(); - break; - } case 48: { optionalFilterFusionCase_ = 6; optionalFilterFusion_ = input.readBool(); @@ -181,150 +165,6 @@ public int getNumber() { optionalApplyDefaultOptimizationsCase_); } - private int optionalAutotuneCase_ = 0; - private java.lang.Object optionalAutotune_; - public enum OptionalAutotuneCase - implements com.google.protobuf.Internal.EnumLite { - AUTOTUNE(2), - OPTIONALAUTOTUNE_NOT_SET(0); - private final int value; - private OptionalAutotuneCase(int value) { - this.value = value; - } - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static OptionalAutotuneCase valueOf(int value) { - return forNumber(value); - } - - public static OptionalAutotuneCase forNumber(int value) { - switch (value) { - case 2: return AUTOTUNE; - case 0: return OPTIONALAUTOTUNE_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public OptionalAutotuneCase - getOptionalAutotuneCase() { - return OptionalAutotuneCase.forNumber( - optionalAutotuneCase_); - } - - private int optionalAutotuneBuffersCase_ = 0; - private java.lang.Object optionalAutotuneBuffers_; - public enum OptionalAutotuneBuffersCase - implements com.google.protobuf.Internal.EnumLite { - AUTOTUNE_BUFFERS(3), - OPTIONALAUTOTUNEBUFFERS_NOT_SET(0); - private final int value; - private OptionalAutotuneBuffersCase(int value) { - this.value = value; - } - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static OptionalAutotuneBuffersCase valueOf(int value) { - return forNumber(value); - } - - public static OptionalAutotuneBuffersCase forNumber(int value) { - switch (value) { - case 3: return AUTOTUNE_BUFFERS; - case 0: return OPTIONALAUTOTUNEBUFFERS_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public OptionalAutotuneBuffersCase - getOptionalAutotuneBuffersCase() { - return OptionalAutotuneBuffersCase.forNumber( - optionalAutotuneBuffersCase_); - } - - private int optionalAutotuneCpuBudgetCase_ = 0; - private java.lang.Object optionalAutotuneCpuBudget_; - public enum OptionalAutotuneCpuBudgetCase - implements com.google.protobuf.Internal.EnumLite { - AUTOTUNE_CPU_BUDGET(4), - OPTIONALAUTOTUNECPUBUDGET_NOT_SET(0); - private final int value; - private OptionalAutotuneCpuBudgetCase(int value) { - this.value = value; - } - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static OptionalAutotuneCpuBudgetCase valueOf(int value) { - return forNumber(value); - } - - public static OptionalAutotuneCpuBudgetCase forNumber(int value) { - switch (value) { - case 4: return AUTOTUNE_CPU_BUDGET; - case 0: return OPTIONALAUTOTUNECPUBUDGET_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public OptionalAutotuneCpuBudgetCase - getOptionalAutotuneCpuBudgetCase() { - return OptionalAutotuneCpuBudgetCase.forNumber( - optionalAutotuneCpuBudgetCase_); - } - - private int optionalAutotuneRamBudgetCase_ = 0; - private java.lang.Object optionalAutotuneRamBudget_; - public enum OptionalAutotuneRamBudgetCase - implements com.google.protobuf.Internal.EnumLite { - AUTOTUNE_RAM_BUDGET(5), - OPTIONALAUTOTUNERAMBUDGET_NOT_SET(0); - private final int value; - private OptionalAutotuneRamBudgetCase(int value) { - this.value = value; - } - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static OptionalAutotuneRamBudgetCase valueOf(int value) { - return forNumber(value); - } - - public static OptionalAutotuneRamBudgetCase forNumber(int value) { - switch (value) { - case 5: return AUTOTUNE_RAM_BUDGET; - case 0: return OPTIONALAUTOTUNERAMBUDGET_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public OptionalAutotuneRamBudgetCase - getOptionalAutotuneRamBudgetCase() { - return OptionalAutotuneRamBudgetCase.forNumber( - optionalAutotuneRamBudgetCase_); - } - private int optionalFilterFusionCase_ = 0; private java.lang.Object optionalFilterFusion_; public enum OptionalFilterFusionCase @@ -624,50 +464,6 @@ public boolean getApplyDefaultOptimizations() { return false; } - public static final int AUTOTUNE_FIELD_NUMBER = 2; - /** - * bool autotune = 2; - */ - public boolean getAutotune() { - if (optionalAutotuneCase_ == 2) { - return (java.lang.Boolean) optionalAutotune_; - } - return false; - } - - public static final int AUTOTUNE_BUFFERS_FIELD_NUMBER = 3; - /** - * bool autotune_buffers = 3; - */ - public boolean getAutotuneBuffers() { - if (optionalAutotuneBuffersCase_ == 3) { - return (java.lang.Boolean) optionalAutotuneBuffers_; - } - return false; - } - - public static final int AUTOTUNE_CPU_BUDGET_FIELD_NUMBER = 4; - /** - * int32 autotune_cpu_budget = 4; - */ - public int getAutotuneCpuBudget() { - if (optionalAutotuneCpuBudgetCase_ == 4) { - return (java.lang.Integer) optionalAutotuneCpuBudget_; - } - return 0; - } - - public static final int AUTOTUNE_RAM_BUDGET_FIELD_NUMBER = 5; - /** - * int64 autotune_ram_budget = 5; - */ - public long getAutotuneRamBudget() { - if (optionalAutotuneRamBudgetCase_ == 5) { - return (java.lang.Long) optionalAutotuneRamBudget_; - } - return 0L; - } - public static final int FILTER_FUSION_FIELD_NUMBER = 6; /** * bool filter_fusion = 6; @@ -774,22 +570,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBool( 1, (boolean)((java.lang.Boolean) optionalApplyDefaultOptimizations_)); } - if (optionalAutotuneCase_ == 2) { - output.writeBool( - 2, (boolean)((java.lang.Boolean) optionalAutotune_)); - } - if (optionalAutotuneBuffersCase_ == 3) { - output.writeBool( - 3, (boolean)((java.lang.Boolean) optionalAutotuneBuffers_)); - } - if (optionalAutotuneCpuBudgetCase_ == 4) { - output.writeInt32( - 4, (int)((java.lang.Integer) optionalAutotuneCpuBudget_)); - } - if (optionalAutotuneRamBudgetCase_ == 5) { - output.writeInt64( - 5, (long)((java.lang.Long) optionalAutotuneRamBudget_)); - } if (optionalFilterFusionCase_ == 6) { output.writeBool( 6, (boolean)((java.lang.Boolean) optionalFilterFusion_)); @@ -836,26 +616,6 @@ public int getSerializedSize() { .computeBoolSize( 1, (boolean)((java.lang.Boolean) optionalApplyDefaultOptimizations_)); } - if (optionalAutotuneCase_ == 2) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize( - 2, (boolean)((java.lang.Boolean) optionalAutotune_)); - } - if (optionalAutotuneBuffersCase_ == 3) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize( - 3, (boolean)((java.lang.Boolean) optionalAutotuneBuffers_)); - } - if (optionalAutotuneCpuBudgetCase_ == 4) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size( - 4, (int)((java.lang.Integer) optionalAutotuneCpuBudget_)); - } - if (optionalAutotuneRamBudgetCase_ == 5) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size( - 5, (long)((java.lang.Long) optionalAutotuneRamBudget_)); - } if (optionalFilterFusionCase_ == 6) { size += com.google.protobuf.CodedOutputStream .computeBoolSize( @@ -920,42 +680,6 @@ public boolean equals(final java.lang.Object obj) { case 0: default: } - if (!getOptionalAutotuneCase().equals(other.getOptionalAutotuneCase())) return false; - switch (optionalAutotuneCase_) { - case 2: - if (getAutotune() - != other.getAutotune()) return false; - break; - case 0: - default: - } - if (!getOptionalAutotuneBuffersCase().equals(other.getOptionalAutotuneBuffersCase())) return false; - switch (optionalAutotuneBuffersCase_) { - case 3: - if (getAutotuneBuffers() - != other.getAutotuneBuffers()) return false; - break; - case 0: - default: - } - if (!getOptionalAutotuneCpuBudgetCase().equals(other.getOptionalAutotuneCpuBudgetCase())) return false; - switch (optionalAutotuneCpuBudgetCase_) { - case 4: - if (getAutotuneCpuBudget() - != other.getAutotuneCpuBudget()) return false; - break; - case 0: - default: - } - if (!getOptionalAutotuneRamBudgetCase().equals(other.getOptionalAutotuneRamBudgetCase())) return false; - switch (optionalAutotuneRamBudgetCase_) { - case 5: - if (getAutotuneRamBudget() - != other.getAutotuneRamBudget()) return false; - break; - case 0: - default: - } if (!getOptionalFilterFusionCase().equals(other.getOptionalFilterFusionCase())) return false; switch (optionalFilterFusionCase_) { case 6: @@ -1048,41 +772,6 @@ public int hashCode() { case 0: default: } - switch (optionalAutotuneCase_) { - case 2: - hash = (37 * hash) + AUTOTUNE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getAutotune()); - break; - case 0: - default: - } - switch (optionalAutotuneBuffersCase_) { - case 3: - hash = (37 * hash) + AUTOTUNE_BUFFERS_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getAutotuneBuffers()); - break; - case 0: - default: - } - switch (optionalAutotuneCpuBudgetCase_) { - case 4: - hash = (37 * hash) + AUTOTUNE_CPU_BUDGET_FIELD_NUMBER; - hash = (53 * hash) + getAutotuneCpuBudget(); - break; - case 0: - default: - } - switch (optionalAutotuneRamBudgetCase_) { - case 5: - hash = (37 * hash) + AUTOTUNE_RAM_BUDGET_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getAutotuneRamBudget()); - break; - case 0: - default: - } switch (optionalFilterFusionCase_) { case 6: hash = (37 * hash) + FILTER_FUSION_FIELD_NUMBER; @@ -1251,6 +940,10 @@ protected Builder newBuilderForType( return builder; } /** + *
    +   * next: 18
    +   * 
    + * * Protobuf type {@code tensorflow.data.OptimizationOptions} */ public static final class Builder extends @@ -1290,14 +983,6 @@ public Builder clear() { super.clear(); optionalApplyDefaultOptimizationsCase_ = 0; optionalApplyDefaultOptimizations_ = null; - optionalAutotuneCase_ = 0; - optionalAutotune_ = null; - optionalAutotuneBuffersCase_ = 0; - optionalAutotuneBuffers_ = null; - optionalAutotuneCpuBudgetCase_ = 0; - optionalAutotuneCpuBudget_ = null; - optionalAutotuneRamBudgetCase_ = 0; - optionalAutotuneRamBudget_ = null; optionalFilterFusionCase_ = 0; optionalFilterFusion_ = null; optionalMapAndBatchFusionCase_ = 0; @@ -1343,18 +1028,6 @@ public org.tensorflow.proto.data.OptimizationOptions buildPartial() { if (optionalApplyDefaultOptimizationsCase_ == 1) { result.optionalApplyDefaultOptimizations_ = optionalApplyDefaultOptimizations_; } - if (optionalAutotuneCase_ == 2) { - result.optionalAutotune_ = optionalAutotune_; - } - if (optionalAutotuneBuffersCase_ == 3) { - result.optionalAutotuneBuffers_ = optionalAutotuneBuffers_; - } - if (optionalAutotuneCpuBudgetCase_ == 4) { - result.optionalAutotuneCpuBudget_ = optionalAutotuneCpuBudget_; - } - if (optionalAutotuneRamBudgetCase_ == 5) { - result.optionalAutotuneRamBudget_ = optionalAutotuneRamBudget_; - } if (optionalFilterFusionCase_ == 6) { result.optionalFilterFusion_ = optionalFilterFusion_; } @@ -1380,10 +1053,6 @@ public org.tensorflow.proto.data.OptimizationOptions buildPartial() { result.optionalShuffleAndRepeatFusion_ = optionalShuffleAndRepeatFusion_; } result.optionalApplyDefaultOptimizationsCase_ = optionalApplyDefaultOptimizationsCase_; - result.optionalAutotuneCase_ = optionalAutotuneCase_; - result.optionalAutotuneBuffersCase_ = optionalAutotuneBuffersCase_; - result.optionalAutotuneCpuBudgetCase_ = optionalAutotuneCpuBudgetCase_; - result.optionalAutotuneRamBudgetCase_ = optionalAutotuneRamBudgetCase_; result.optionalFilterFusionCase_ = optionalFilterFusionCase_; result.optionalMapAndBatchFusionCase_ = optionalMapAndBatchFusionCase_; result.optionalMapAndFilterFusionCase_ = optionalMapAndFilterFusionCase_; @@ -1449,42 +1118,6 @@ public Builder mergeFrom(org.tensorflow.proto.data.OptimizationOptions other) { break; } } - switch (other.getOptionalAutotuneCase()) { - case AUTOTUNE: { - setAutotune(other.getAutotune()); - break; - } - case OPTIONALAUTOTUNE_NOT_SET: { - break; - } - } - switch (other.getOptionalAutotuneBuffersCase()) { - case AUTOTUNE_BUFFERS: { - setAutotuneBuffers(other.getAutotuneBuffers()); - break; - } - case OPTIONALAUTOTUNEBUFFERS_NOT_SET: { - break; - } - } - switch (other.getOptionalAutotuneCpuBudgetCase()) { - case AUTOTUNE_CPU_BUDGET: { - setAutotuneCpuBudget(other.getAutotuneCpuBudget()); - break; - } - case OPTIONALAUTOTUNECPUBUDGET_NOT_SET: { - break; - } - } - switch (other.getOptionalAutotuneRamBudgetCase()) { - case AUTOTUNE_RAM_BUDGET: { - setAutotuneRamBudget(other.getAutotuneRamBudget()); - break; - } - case OPTIONALAUTOTUNERAMBUDGET_NOT_SET: { - break; - } - } switch (other.getOptionalFilterFusionCase()) { case FILTER_FUSION: { setFilterFusion(other.getFilterFusion()); @@ -1600,66 +1233,6 @@ public Builder clearOptionalApplyDefaultOptimizations() { return this; } - private int optionalAutotuneCase_ = 0; - private java.lang.Object optionalAutotune_; - public OptionalAutotuneCase - getOptionalAutotuneCase() { - return OptionalAutotuneCase.forNumber( - optionalAutotuneCase_); - } - - public Builder clearOptionalAutotune() { - optionalAutotuneCase_ = 0; - optionalAutotune_ = null; - onChanged(); - return this; - } - - private int optionalAutotuneBuffersCase_ = 0; - private java.lang.Object optionalAutotuneBuffers_; - public OptionalAutotuneBuffersCase - getOptionalAutotuneBuffersCase() { - return OptionalAutotuneBuffersCase.forNumber( - optionalAutotuneBuffersCase_); - } - - public Builder clearOptionalAutotuneBuffers() { - optionalAutotuneBuffersCase_ = 0; - optionalAutotuneBuffers_ = null; - onChanged(); - return this; - } - - private int optionalAutotuneCpuBudgetCase_ = 0; - private java.lang.Object optionalAutotuneCpuBudget_; - public OptionalAutotuneCpuBudgetCase - getOptionalAutotuneCpuBudgetCase() { - return OptionalAutotuneCpuBudgetCase.forNumber( - optionalAutotuneCpuBudgetCase_); - } - - public Builder clearOptionalAutotuneCpuBudget() { - optionalAutotuneCpuBudgetCase_ = 0; - optionalAutotuneCpuBudget_ = null; - onChanged(); - return this; - } - - private int optionalAutotuneRamBudgetCase_ = 0; - private java.lang.Object optionalAutotuneRamBudget_; - public OptionalAutotuneRamBudgetCase - getOptionalAutotuneRamBudgetCase() { - return OptionalAutotuneRamBudgetCase.forNumber( - optionalAutotuneRamBudgetCase_); - } - - public Builder clearOptionalAutotuneRamBudget() { - optionalAutotuneRamBudgetCase_ = 0; - optionalAutotuneRamBudget_ = null; - onChanged(); - return this; - } - private int optionalFilterFusionCase_ = 0; private java.lang.Object optionalFilterFusion_; public OptionalFilterFusionCase @@ -1811,126 +1384,6 @@ public Builder clearApplyDefaultOptimizations() { return this; } - /** - * bool autotune = 2; - */ - public boolean getAutotune() { - if (optionalAutotuneCase_ == 2) { - return (java.lang.Boolean) optionalAutotune_; - } - return false; - } - /** - * bool autotune = 2; - */ - public Builder setAutotune(boolean value) { - optionalAutotuneCase_ = 2; - optionalAutotune_ = value; - onChanged(); - return this; - } - /** - * bool autotune = 2; - */ - public Builder clearAutotune() { - if (optionalAutotuneCase_ == 2) { - optionalAutotuneCase_ = 0; - optionalAutotune_ = null; - onChanged(); - } - return this; - } - - /** - * bool autotune_buffers = 3; - */ - public boolean getAutotuneBuffers() { - if (optionalAutotuneBuffersCase_ == 3) { - return (java.lang.Boolean) optionalAutotuneBuffers_; - } - return false; - } - /** - * bool autotune_buffers = 3; - */ - public Builder setAutotuneBuffers(boolean value) { - optionalAutotuneBuffersCase_ = 3; - optionalAutotuneBuffers_ = value; - onChanged(); - return this; - } - /** - * bool autotune_buffers = 3; - */ - public Builder clearAutotuneBuffers() { - if (optionalAutotuneBuffersCase_ == 3) { - optionalAutotuneBuffersCase_ = 0; - optionalAutotuneBuffers_ = null; - onChanged(); - } - return this; - } - - /** - * int32 autotune_cpu_budget = 4; - */ - public int getAutotuneCpuBudget() { - if (optionalAutotuneCpuBudgetCase_ == 4) { - return (java.lang.Integer) optionalAutotuneCpuBudget_; - } - return 0; - } - /** - * int32 autotune_cpu_budget = 4; - */ - public Builder setAutotuneCpuBudget(int value) { - optionalAutotuneCpuBudgetCase_ = 4; - optionalAutotuneCpuBudget_ = value; - onChanged(); - return this; - } - /** - * int32 autotune_cpu_budget = 4; - */ - public Builder clearAutotuneCpuBudget() { - if (optionalAutotuneCpuBudgetCase_ == 4) { - optionalAutotuneCpuBudgetCase_ = 0; - optionalAutotuneCpuBudget_ = null; - onChanged(); - } - return this; - } - - /** - * int64 autotune_ram_budget = 5; - */ - public long getAutotuneRamBudget() { - if (optionalAutotuneRamBudgetCase_ == 5) { - return (java.lang.Long) optionalAutotuneRamBudget_; - } - return 0L; - } - /** - * int64 autotune_ram_budget = 5; - */ - public Builder setAutotuneRamBudget(long value) { - optionalAutotuneRamBudgetCase_ = 5; - optionalAutotuneRamBudget_ = value; - onChanged(); - return this; - } - /** - * int64 autotune_ram_budget = 5; - */ - public Builder clearAutotuneRamBudget() { - if (optionalAutotuneRamBudgetCase_ == 5) { - optionalAutotuneRamBudgetCase_ = 0; - optionalAutotuneRamBudget_ = null; - onChanged(); - } - return this; - } - /** * bool filter_fusion = 6; */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java index ac861c4710b..8a79fae02c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptimizationOptionsOrBuilder.java @@ -12,26 +12,6 @@ public interface OptimizationOptionsOrBuilder extends */ boolean getApplyDefaultOptimizations(); - /** - * bool autotune = 2; - */ - boolean getAutotune(); - - /** - * bool autotune_buffers = 3; - */ - boolean getAutotuneBuffers(); - - /** - * int32 autotune_cpu_budget = 4; - */ - int getAutotuneCpuBudget(); - - /** - * int64 autotune_ram_budget = 5; - */ - long getAutotuneRamBudget(); - /** * bool filter_fusion = 6; */ @@ -74,14 +54,6 @@ public interface OptimizationOptionsOrBuilder extends public org.tensorflow.proto.data.OptimizationOptions.OptionalApplyDefaultOptimizationsCase getOptionalApplyDefaultOptimizationsCase(); - public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneCase getOptionalAutotuneCase(); - - public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneBuffersCase getOptionalAutotuneBuffersCase(); - - public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneCpuBudgetCase getOptionalAutotuneCpuBudgetCase(); - - public org.tensorflow.proto.data.OptimizationOptions.OptionalAutotuneRamBudgetCase getOptionalAutotuneRamBudgetCase(); - public org.tensorflow.proto.data.OptimizationOptions.OptionalFilterFusionCase getOptionalFilterFusionCase(); public org.tensorflow.proto.data.OptimizationOptions.OptionalMapAndBatchFusionCase getOptionalMapAndBatchFusionCase(); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java index b0b8481e67b..7ce0ed3aa29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/Options.java @@ -7,6 +7,7 @@ *
      * Message stored with Dataset objects to control how datasets are processed and
      * optimized.
    + * next: 8
      * 
    * * Protobuf type {@code tensorflow.data.Options} @@ -108,6 +109,19 @@ private Options( optionalExternalStatePolicy_ = rawValue; break; } + case 58: { + org.tensorflow.proto.data.AutotuneOptions.Builder subBuilder = null; + if (autotuneOptions_ != null) { + subBuilder = autotuneOptions_.toBuilder(); + } + autotuneOptions_ = input.readMessage(org.tensorflow.proto.data.AutotuneOptions.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(autotuneOptions_); + autotuneOptions_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -259,6 +273,39 @@ public boolean getDeterministic() { return false; } + public static final int AUTOTUNE_OPTIONS_FIELD_NUMBER = 7; + private org.tensorflow.proto.data.AutotuneOptions autotuneOptions_; + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public boolean hasAutotuneOptions() { + return autotuneOptions_ != null; + } + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public org.tensorflow.proto.data.AutotuneOptions getAutotuneOptions() { + return autotuneOptions_ == null ? org.tensorflow.proto.data.AutotuneOptions.getDefaultInstance() : autotuneOptions_; + } + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public org.tensorflow.proto.data.AutotuneOptionsOrBuilder getAutotuneOptionsOrBuilder() { + return getAutotuneOptions(); + } + public static final int DISTRIBUTE_OPTIONS_FIELD_NUMBER = 2; private org.tensorflow.proto.data.DistributeOptions distributeOptions_; /** @@ -426,6 +473,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (optionalExternalStatePolicyCase_ == 6) { output.writeEnum(6, ((java.lang.Integer) optionalExternalStatePolicy_)); } + if (autotuneOptions_ != null) { + output.writeMessage(7, getAutotuneOptions()); + } unknownFields.writeTo(output); } @@ -461,6 +511,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeEnumSize(6, ((java.lang.Integer) optionalExternalStatePolicy_)); } + if (autotuneOptions_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, getAutotuneOptions()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -476,6 +530,11 @@ public boolean equals(final java.lang.Object obj) { } org.tensorflow.proto.data.Options other = (org.tensorflow.proto.data.Options) obj; + if (hasAutotuneOptions() != other.hasAutotuneOptions()) return false; + if (hasAutotuneOptions()) { + if (!getAutotuneOptions() + .equals(other.getAutotuneOptions())) return false; + } if (hasDistributeOptions() != other.hasDistributeOptions()) return false; if (hasDistributeOptions()) { if (!getDistributeOptions() @@ -529,6 +588,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAutotuneOptions()) { + hash = (37 * hash) + AUTOTUNE_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getAutotuneOptions().hashCode(); + } if (hasDistributeOptions()) { hash = (37 * hash) + DISTRIBUTE_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getDistributeOptions().hashCode(); @@ -666,6 +729,7 @@ protected Builder newBuilderForType( *
        * Message stored with Dataset objects to control how datasets are processed and
        * optimized.
    +   * next: 8
        * 
    * * Protobuf type {@code tensorflow.data.Options} @@ -705,6 +769,12 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); + if (autotuneOptionsBuilder_ == null) { + autotuneOptions_ = null; + } else { + autotuneOptions_ = null; + autotuneOptionsBuilder_ = null; + } if (distributeOptionsBuilder_ == null) { distributeOptions_ = null; } else { @@ -758,6 +828,11 @@ public org.tensorflow.proto.data.Options buildPartial() { if (optionalDeterministicCase_ == 1) { result.optionalDeterministic_ = optionalDeterministic_; } + if (autotuneOptionsBuilder_ == null) { + result.autotuneOptions_ = autotuneOptions_; + } else { + result.autotuneOptions_ = autotuneOptionsBuilder_.build(); + } if (distributeOptionsBuilder_ == null) { result.distributeOptions_ = distributeOptions_; } else { @@ -830,6 +905,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.tensorflow.proto.data.Options other) { if (other == org.tensorflow.proto.data.Options.getDefaultInstance()) return this; + if (other.hasAutotuneOptions()) { + mergeAutotuneOptions(other.getAutotuneOptions()); + } if (other.hasDistributeOptions()) { mergeDistributeOptions(other.getDistributeOptions()); } @@ -970,6 +1048,159 @@ public Builder clearDeterministic() { return this; } + private org.tensorflow.proto.data.AutotuneOptions autotuneOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.AutotuneOptions, org.tensorflow.proto.data.AutotuneOptions.Builder, org.tensorflow.proto.data.AutotuneOptionsOrBuilder> autotuneOptionsBuilder_; + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public boolean hasAutotuneOptions() { + return autotuneOptionsBuilder_ != null || autotuneOptions_ != null; + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public org.tensorflow.proto.data.AutotuneOptions getAutotuneOptions() { + if (autotuneOptionsBuilder_ == null) { + return autotuneOptions_ == null ? org.tensorflow.proto.data.AutotuneOptions.getDefaultInstance() : autotuneOptions_; + } else { + return autotuneOptionsBuilder_.getMessage(); + } + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public Builder setAutotuneOptions(org.tensorflow.proto.data.AutotuneOptions value) { + if (autotuneOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autotuneOptions_ = value; + onChanged(); + } else { + autotuneOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public Builder setAutotuneOptions( + org.tensorflow.proto.data.AutotuneOptions.Builder builderForValue) { + if (autotuneOptionsBuilder_ == null) { + autotuneOptions_ = builderForValue.build(); + onChanged(); + } else { + autotuneOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public Builder mergeAutotuneOptions(org.tensorflow.proto.data.AutotuneOptions value) { + if (autotuneOptionsBuilder_ == null) { + if (autotuneOptions_ != null) { + autotuneOptions_ = + org.tensorflow.proto.data.AutotuneOptions.newBuilder(autotuneOptions_).mergeFrom(value).buildPartial(); + } else { + autotuneOptions_ = value; + } + onChanged(); + } else { + autotuneOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public Builder clearAutotuneOptions() { + if (autotuneOptionsBuilder_ == null) { + autotuneOptions_ = null; + onChanged(); + } else { + autotuneOptions_ = null; + autotuneOptionsBuilder_ = null; + } + + return this; + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public org.tensorflow.proto.data.AutotuneOptions.Builder getAutotuneOptionsBuilder() { + + onChanged(); + return getAutotuneOptionsFieldBuilder().getBuilder(); + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + public org.tensorflow.proto.data.AutotuneOptionsOrBuilder getAutotuneOptionsOrBuilder() { + if (autotuneOptionsBuilder_ != null) { + return autotuneOptionsBuilder_.getMessageOrBuilder(); + } else { + return autotuneOptions_ == null ? + org.tensorflow.proto.data.AutotuneOptions.getDefaultInstance() : autotuneOptions_; + } + } + /** + *
    +     * The distribution strategy options associated with the dataset.
    +     * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.AutotuneOptions, org.tensorflow.proto.data.AutotuneOptions.Builder, org.tensorflow.proto.data.AutotuneOptionsOrBuilder> + getAutotuneOptionsFieldBuilder() { + if (autotuneOptionsBuilder_ == null) { + autotuneOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.data.AutotuneOptions, org.tensorflow.proto.data.AutotuneOptions.Builder, org.tensorflow.proto.data.AutotuneOptionsOrBuilder>( + getAutotuneOptions(), + getParentForChildren(), + isClean()); + autotuneOptions_ = null; + } + return autotuneOptionsBuilder_; + } + private org.tensorflow.proto.data.DistributeOptions distributeOptions_; private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.proto.data.DistributeOptions, org.tensorflow.proto.data.DistributeOptions.Builder, org.tensorflow.proto.data.DistributeOptionsOrBuilder> distributeOptionsBuilder_; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java index b4f2077d1ca..570e75577cf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/OptionsOrBuilder.java @@ -12,6 +12,31 @@ public interface OptionsOrBuilder extends */ boolean getDeterministic(); + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + boolean hasAutotuneOptions(); + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + org.tensorflow.proto.data.AutotuneOptions getAutotuneOptions(); + /** + *
    +   * The distribution strategy options associated with the dataset.
    +   * 
    + * + * .tensorflow.data.AutotuneOptions autotune_options = 7; + */ + org.tensorflow.proto.data.AutotuneOptionsOrBuilder getAutotuneOptionsOrBuilder(); + /** *
        * The distribution strategy options associated with the dataset.
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java
    index eebc5aaf459..4af22789cb7 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/ThreadingOptions.java
    @@ -4,6 +4,10 @@
     package org.tensorflow.proto.data;
     
     /**
    + * 
    + * next: 3
    + * 
    + * * Protobuf type {@code tensorflow.data.ThreadingOptions} */ public final class ThreadingOptions extends @@ -381,6 +385,10 @@ protected Builder newBuilderForType( return builder; } /** + *
    +   * next: 3
    +   * 
    + * * Protobuf type {@code tensorflow.data.ThreadingOptions} */ public static final class Builder extends diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java index 3b49f356e2a..06ab63a0693 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java @@ -78,7 +78,64 @@ public interface DispatcherConfigOrBuilder extends /** *
    -     * How often the dispatcher should scan through to delete old and unused jobs.
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + java.util.List + getWorkerAddressesList(); + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + int getWorkerAddressesCount(); + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + java.lang.String getWorkerAddresses(int index); + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + com.google.protobuf.ByteString + getWorkerAddressesBytes(int index); + + /** + *
    +     * How often the dispatcher should scan through to delete old and unused
    +     * jobs. A value of 0 indicates that the decision should be left up to the
    +     * runtime.
          * 
    * * int64 job_gc_check_interval_ms = 5; @@ -89,16 +146,29 @@ public interface DispatcherConfigOrBuilder extends *
          * How long a job needs to be unused before it becomes a candidate for garbage
          * collection. A value of -1 indicates that jobs should never be garbage
    -     * collected.
    +     * collected. A value of 0 indicates that the decision should be left up to
    +     * the runtime.
          * 
    * * int64 job_gc_timeout_ms = 6; */ long getJobGcTimeoutMs(); + + /** + *
    +     * How long to wait before garbage-collecting a client that hasn't
    +     * heartbeated to the dispatcher. A value of 0 indicates that the timeout
    +     * should be left to the runtime.
    +     * 
    + * + * int64 client_timeout_ms = 8; + */ + long getClientTimeoutMs(); } /** *
        * Configuration for a tf.data service DispatchServer.
    +   * Next id: 9
        * 
    * * Protobuf type {@code tensorflow.data.experimental.DispatcherConfig} @@ -115,6 +185,7 @@ private DispatcherConfig(com.google.protobuf.GeneratedMessageV3.Builder build private DispatcherConfig() { protocol_ = ""; workDir_ = ""; + workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; } @java.lang.Override @@ -137,6 +208,7 @@ private DispatcherConfig( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -179,6 +251,20 @@ private DispatcherConfig( jobGcTimeoutMs_ = input.readInt64(); break; } + case 58: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + workerAddresses_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + workerAddresses_.add(s); + break; + } + case 64: { + + clientTimeoutMs_ = input.readInt64(); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -194,6 +280,9 @@ private DispatcherConfig( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + workerAddresses_ = workerAddresses_.getUnmodifiableView(); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -325,11 +414,78 @@ public boolean getFaultTolerantMode() { return faultTolerantMode_; } + public static final int WORKER_ADDRESSES_FIELD_NUMBER = 7; + private com.google.protobuf.LazyStringList workerAddresses_; + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + public com.google.protobuf.ProtocolStringList + getWorkerAddressesList() { + return workerAddresses_; + } + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + public int getWorkerAddressesCount() { + return workerAddresses_.size(); + } + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + public java.lang.String getWorkerAddresses(int index) { + return workerAddresses_.get(index); + } + /** + *
    +     * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +     * of worker addresses that will register with the dispatcher. The worker
    +     * addresses should be in the format "host" or "host:port", where "port" is an
    +     * integer, named port, or %port% to match any port.
    +     * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +     * allowed in the "COLOCATED" mode.
    +     * 
    + * + * repeated string worker_addresses = 7; + */ + public com.google.protobuf.ByteString + getWorkerAddressesBytes(int index) { + return workerAddresses_.getByteString(index); + } + public static final int JOB_GC_CHECK_INTERVAL_MS_FIELD_NUMBER = 5; private long jobGcCheckIntervalMs_; /** *
    -     * How often the dispatcher should scan through to delete old and unused jobs.
    +     * How often the dispatcher should scan through to delete old and unused
    +     * jobs. A value of 0 indicates that the decision should be left up to the
    +     * runtime.
          * 
    * * int64 job_gc_check_interval_ms = 5; @@ -344,7 +500,8 @@ public long getJobGcCheckIntervalMs() { *
          * How long a job needs to be unused before it becomes a candidate for garbage
          * collection. A value of -1 indicates that jobs should never be garbage
    -     * collected.
    +     * collected. A value of 0 indicates that the decision should be left up to
    +     * the runtime.
          * 
    * * int64 job_gc_timeout_ms = 6; @@ -353,6 +510,21 @@ public long getJobGcTimeoutMs() { return jobGcTimeoutMs_; } + public static final int CLIENT_TIMEOUT_MS_FIELD_NUMBER = 8; + private long clientTimeoutMs_; + /** + *
    +     * How long to wait before garbage-collecting a client that hasn't
    +     * heartbeated to the dispatcher. A value of 0 indicates that the timeout
    +     * should be left to the runtime.
    +     * 
    + * + * int64 client_timeout_ms = 8; + */ + public long getClientTimeoutMs() { + return clientTimeoutMs_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -385,6 +557,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (jobGcTimeoutMs_ != 0L) { output.writeInt64(6, jobGcTimeoutMs_); } + for (int i = 0; i < workerAddresses_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, workerAddresses_.getRaw(i)); + } + if (clientTimeoutMs_ != 0L) { + output.writeInt64(8, clientTimeoutMs_); + } unknownFields.writeTo(output); } @@ -416,6 +594,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(6, jobGcTimeoutMs_); } + { + int dataSize = 0; + for (int i = 0; i < workerAddresses_.size(); i++) { + dataSize += computeStringSizeNoTag(workerAddresses_.getRaw(i)); + } + size += dataSize; + size += 1 * getWorkerAddressesList().size(); + } + if (clientTimeoutMs_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, clientTimeoutMs_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -439,10 +629,14 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getWorkDir())) return false; if (getFaultTolerantMode() != other.getFaultTolerantMode()) return false; + if (!getWorkerAddressesList() + .equals(other.getWorkerAddressesList())) return false; if (getJobGcCheckIntervalMs() != other.getJobGcCheckIntervalMs()) return false; if (getJobGcTimeoutMs() != other.getJobGcTimeoutMs()) return false; + if (getClientTimeoutMs() + != other.getClientTimeoutMs()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -464,12 +658,19 @@ public int hashCode() { hash = (37 * hash) + FAULT_TOLERANT_MODE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getFaultTolerantMode()); + if (getWorkerAddressesCount() > 0) { + hash = (37 * hash) + WORKER_ADDRESSES_FIELD_NUMBER; + hash = (53 * hash) + getWorkerAddressesList().hashCode(); + } hash = (37 * hash) + JOB_GC_CHECK_INTERVAL_MS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getJobGcCheckIntervalMs()); hash = (37 * hash) + JOB_GC_TIMEOUT_MS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getJobGcTimeoutMs()); + hash = (37 * hash) + CLIENT_TIMEOUT_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getClientTimeoutMs()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -568,6 +769,7 @@ protected Builder newBuilderForType( /** *
          * Configuration for a tf.data service DispatchServer.
    +     * Next id: 9
          * 
    * * Protobuf type {@code tensorflow.data.experimental.DispatcherConfig} @@ -615,10 +817,14 @@ public Builder clear() { faultTolerantMode_ = false; + workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); jobGcCheckIntervalMs_ = 0L; jobGcTimeoutMs_ = 0L; + clientTimeoutMs_ = 0L; + return this; } @@ -645,12 +851,19 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.DispatcherConfig bui @java.lang.Override public org.tensorflow.proto.data.experimental.ServiceConfig.DispatcherConfig buildPartial() { org.tensorflow.proto.data.experimental.ServiceConfig.DispatcherConfig result = new org.tensorflow.proto.data.experimental.ServiceConfig.DispatcherConfig(this); + int from_bitField0_ = bitField0_; result.port_ = port_; result.protocol_ = protocol_; result.workDir_ = workDir_; result.faultTolerantMode_ = faultTolerantMode_; + if (((bitField0_ & 0x00000001) != 0)) { + workerAddresses_ = workerAddresses_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.workerAddresses_ = workerAddresses_; result.jobGcCheckIntervalMs_ = jobGcCheckIntervalMs_; result.jobGcTimeoutMs_ = jobGcTimeoutMs_; + result.clientTimeoutMs_ = clientTimeoutMs_; onBuilt(); return result; } @@ -713,12 +926,25 @@ public Builder mergeFrom(org.tensorflow.proto.data.experimental.ServiceConfig.Di if (other.getFaultTolerantMode() != false) { setFaultTolerantMode(other.getFaultTolerantMode()); } + if (!other.workerAddresses_.isEmpty()) { + if (workerAddresses_.isEmpty()) { + workerAddresses_ = other.workerAddresses_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureWorkerAddressesIsMutable(); + workerAddresses_.addAll(other.workerAddresses_); + } + onChanged(); + } if (other.getJobGcCheckIntervalMs() != 0L) { setJobGcCheckIntervalMs(other.getJobGcCheckIntervalMs()); } if (other.getJobGcTimeoutMs() != 0L) { setJobGcTimeoutMs(other.getJobGcTimeoutMs()); } + if (other.getClientTimeoutMs() != 0L) { + setClientTimeoutMs(other.getClientTimeoutMs()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -747,6 +973,7 @@ public Builder mergeFrom( } return this; } + private int bitField0_; private long port_ ; /** @@ -1013,10 +1240,187 @@ public Builder clearFaultTolerantMode() { return this; } + private com.google.protobuf.LazyStringList workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureWorkerAddressesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + workerAddresses_ = new com.google.protobuf.LazyStringArrayList(workerAddresses_); + bitField0_ |= 0x00000001; + } + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public com.google.protobuf.ProtocolStringList + getWorkerAddressesList() { + return workerAddresses_.getUnmodifiableView(); + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public int getWorkerAddressesCount() { + return workerAddresses_.size(); + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public java.lang.String getWorkerAddresses(int index) { + return workerAddresses_.get(index); + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public com.google.protobuf.ByteString + getWorkerAddressesBytes(int index) { + return workerAddresses_.getByteString(index); + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public Builder setWorkerAddresses( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWorkerAddressesIsMutable(); + workerAddresses_.set(index, value); + onChanged(); + return this; + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public Builder addWorkerAddresses( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWorkerAddressesIsMutable(); + workerAddresses_.add(value); + onChanged(); + return this; + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public Builder addAllWorkerAddresses( + java.lang.Iterable values) { + ensureWorkerAddressesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, workerAddresses_); + onChanged(); + return this; + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public Builder clearWorkerAddresses() { + workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
    +       * (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
    +       * of worker addresses that will register with the dispatcher. The worker
    +       * addresses should be in the format "host" or "host:port", where "port" is an
    +       * integer, named port, or %port% to match any port.
    +       * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be
    +       * allowed in the "COLOCATED" mode.
    +       * 
    + * + * repeated string worker_addresses = 7; + */ + public Builder addWorkerAddressesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWorkerAddressesIsMutable(); + workerAddresses_.add(value); + onChanged(); + return this; + } + private long jobGcCheckIntervalMs_ ; /** *
    -       * How often the dispatcher should scan through to delete old and unused jobs.
    +       * How often the dispatcher should scan through to delete old and unused
    +       * jobs. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 job_gc_check_interval_ms = 5; @@ -1026,7 +1430,9 @@ public long getJobGcCheckIntervalMs() { } /** *
    -       * How often the dispatcher should scan through to delete old and unused jobs.
    +       * How often the dispatcher should scan through to delete old and unused
    +       * jobs. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 job_gc_check_interval_ms = 5; @@ -1039,7 +1445,9 @@ public Builder setJobGcCheckIntervalMs(long value) { } /** *
    -       * How often the dispatcher should scan through to delete old and unused jobs.
    +       * How often the dispatcher should scan through to delete old and unused
    +       * jobs. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 job_gc_check_interval_ms = 5; @@ -1056,7 +1464,8 @@ public Builder clearJobGcCheckIntervalMs() { *
            * How long a job needs to be unused before it becomes a candidate for garbage
            * collection. A value of -1 indicates that jobs should never be garbage
    -       * collected.
    +       * collected. A value of 0 indicates that the decision should be left up to
    +       * the runtime.
            * 
    * * int64 job_gc_timeout_ms = 6; @@ -1068,7 +1477,8 @@ public long getJobGcTimeoutMs() { *
            * How long a job needs to be unused before it becomes a candidate for garbage
            * collection. A value of -1 indicates that jobs should never be garbage
    -       * collected.
    +       * collected. A value of 0 indicates that the decision should be left up to
    +       * the runtime.
            * 
    * * int64 job_gc_timeout_ms = 6; @@ -1083,7 +1493,8 @@ public Builder setJobGcTimeoutMs(long value) { *
            * How long a job needs to be unused before it becomes a candidate for garbage
            * collection. A value of -1 indicates that jobs should never be garbage
    -       * collected.
    +       * collected. A value of 0 indicates that the decision should be left up to
    +       * the runtime.
            * 
    * * int64 job_gc_timeout_ms = 6; @@ -1094,6 +1505,50 @@ public Builder clearJobGcTimeoutMs() { onChanged(); return this; } + + private long clientTimeoutMs_ ; + /** + *
    +       * How long to wait before garbage-collecting a client that hasn't
    +       * heartbeated to the dispatcher. A value of 0 indicates that the timeout
    +       * should be left to the runtime.
    +       * 
    + * + * int64 client_timeout_ms = 8; + */ + public long getClientTimeoutMs() { + return clientTimeoutMs_; + } + /** + *
    +       * How long to wait before garbage-collecting a client that hasn't
    +       * heartbeated to the dispatcher. A value of 0 indicates that the timeout
    +       * should be left to the runtime.
    +       * 
    + * + * int64 client_timeout_ms = 8; + */ + public Builder setClientTimeoutMs(long value) { + + clientTimeoutMs_ = value; + onChanged(); + return this; + } + /** + *
    +       * How long to wait before garbage-collecting a client that hasn't
    +       * heartbeated to the dispatcher. A value of 0 indicates that the timeout
    +       * should be left to the runtime.
    +       * 
    + * + * int64 client_timeout_ms = 8; + */ + public Builder clearClientTimeoutMs() { + + clientTimeoutMs_ = 0L; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -1221,7 +1676,55 @@ public interface WorkerConfigOrBuilder extends /** *
    -     * How often the worker should heartbeat to the master.
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + java.util.List + getWorkerTagsList(); + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + int getWorkerTagsCount(); + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + java.lang.String getWorkerTags(int index); + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + com.google.protobuf.ByteString + getWorkerTagsBytes(int index); + + /** + *
    +     * How often the worker should heartbeat to the master. A value of 0 indicates
    +     * that the decision should be left up to the runtime.
          * 
    * * int64 heartbeat_interval_ms = 5; @@ -1231,7 +1734,8 @@ public interface WorkerConfigOrBuilder extends /** *
          * How long to retry requests to the dispatcher before giving up and reporting
    -     * an error.
    +     * an error. A value of 0 indicates that the decision should be left up to the
    +     * runtime.
          * 
    * * int64 dispatcher_timeout_ms = 6; @@ -1292,6 +1796,7 @@ public interface WorkerConfigOrBuilder extends /** *
        * Configuration for a tf.data service WorkerServer.
    +   * Next id: 11
        * 
    * * Protobuf type {@code tensorflow.data.experimental.WorkerConfig} @@ -1309,6 +1814,7 @@ private WorkerConfig() { protocol_ = ""; dispatcherAddress_ = ""; workerAddress_ = ""; + workerTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; dataTransferProtocol_ = ""; dataTransferAddress_ = ""; } @@ -1333,6 +1839,7 @@ private WorkerConfig( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -1393,6 +1900,15 @@ private WorkerConfig( shutdownQuietPeriodMs_ = input.readInt64(); break; } + case 82: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + workerTags_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + workerTags_.add(s); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -1408,6 +1924,9 @@ private WorkerConfig( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + workerTags_ = workerTags_.getUnmodifiableView(); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -1569,11 +2088,69 @@ public java.lang.String getWorkerAddress() { } } + public static final int WORKER_TAGS_FIELD_NUMBER = 10; + private com.google.protobuf.LazyStringList workerTags_; + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + public com.google.protobuf.ProtocolStringList + getWorkerTagsList() { + return workerTags_; + } + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + public int getWorkerTagsCount() { + return workerTags_.size(); + } + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + public java.lang.String getWorkerTags(int index) { + return workerTags_.get(index); + } + /** + *
    +     * Tags attached to the worker. This allows reading from selected workers.
    +     * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +     * from the local tf.data worker if one exists, then from off-TF-host workers,
    +     * to avoid cross-TF-host reads.
    +     * 
    + * + * repeated string worker_tags = 10; + */ + public com.google.protobuf.ByteString + getWorkerTagsBytes(int index) { + return workerTags_.getByteString(index); + } + public static final int HEARTBEAT_INTERVAL_MS_FIELD_NUMBER = 5; private long heartbeatIntervalMs_; /** *
    -     * How often the worker should heartbeat to the master.
    +     * How often the worker should heartbeat to the master. A value of 0 indicates
    +     * that the decision should be left up to the runtime.
          * 
    * * int64 heartbeat_interval_ms = 5; @@ -1587,7 +2164,8 @@ public long getHeartbeatIntervalMs() { /** *
          * How long to retry requests to the dispatcher before giving up and reporting
    -     * an error.
    +     * an error. A value of 0 indicates that the decision should be left up to the
    +     * runtime.
          * 
    * * int64 dispatcher_timeout_ms = 6; @@ -1740,6 +2318,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (shutdownQuietPeriodMs_ != 0L) { output.writeInt64(9, shutdownQuietPeriodMs_); } + for (int i = 0; i < workerTags_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 10, workerTags_.getRaw(i)); + } unknownFields.writeTo(output); } @@ -1780,6 +2361,14 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(9, shutdownQuietPeriodMs_); } + { + int dataSize = 0; + for (int i = 0; i < workerTags_.size(); i++) { + dataSize += computeStringSizeNoTag(workerTags_.getRaw(i)); + } + size += dataSize; + size += 1 * getWorkerTagsList().size(); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1803,6 +2392,8 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getDispatcherAddress())) return false; if (!getWorkerAddress() .equals(other.getWorkerAddress())) return false; + if (!getWorkerTagsList() + .equals(other.getWorkerTagsList())) return false; if (getHeartbeatIntervalMs() != other.getHeartbeatIntervalMs()) return false; if (getDispatcherTimeoutMs() @@ -1833,6 +2424,10 @@ public int hashCode() { hash = (53 * hash) + getDispatcherAddress().hashCode(); hash = (37 * hash) + WORKER_ADDRESS_FIELD_NUMBER; hash = (53 * hash) + getWorkerAddress().hashCode(); + if (getWorkerTagsCount() > 0) { + hash = (37 * hash) + WORKER_TAGS_FIELD_NUMBER; + hash = (53 * hash) + getWorkerTagsList().hashCode(); + } hash = (37 * hash) + HEARTBEAT_INTERVAL_MS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getHeartbeatIntervalMs()); @@ -1944,6 +2539,7 @@ protected Builder newBuilderForType( /** *
          * Configuration for a tf.data service WorkerServer.
    +     * Next id: 11
          * 
    * * Protobuf type {@code tensorflow.data.experimental.WorkerConfig} @@ -1991,6 +2587,8 @@ public Builder clear() { workerAddress_ = ""; + workerTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); heartbeatIntervalMs_ = 0L; dispatcherTimeoutMs_ = 0L; @@ -2027,10 +2625,16 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig build() @java.lang.Override public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig buildPartial() { org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig result = new org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig(this); + int from_bitField0_ = bitField0_; result.port_ = port_; result.protocol_ = protocol_; result.dispatcherAddress_ = dispatcherAddress_; result.workerAddress_ = workerAddress_; + if (((bitField0_ & 0x00000001) != 0)) { + workerTags_ = workerTags_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.workerTags_ = workerTags_; result.heartbeatIntervalMs_ = heartbeatIntervalMs_; result.dispatcherTimeoutMs_ = dispatcherTimeoutMs_; result.dataTransferProtocol_ = dataTransferProtocol_; @@ -2099,6 +2703,16 @@ public Builder mergeFrom(org.tensorflow.proto.data.experimental.ServiceConfig.Wo workerAddress_ = other.workerAddress_; onChanged(); } + if (!other.workerTags_.isEmpty()) { + if (workerTags_.isEmpty()) { + workerTags_ = other.workerTags_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureWorkerTagsIsMutable(); + workerTags_.addAll(other.workerTags_); + } + onChanged(); + } if (other.getHeartbeatIntervalMs() != 0L) { setHeartbeatIntervalMs(other.getHeartbeatIntervalMs()); } @@ -2144,6 +2758,7 @@ public Builder mergeFrom( } return this; } + private int bitField0_; private long port_ ; /** @@ -2463,10 +3078,168 @@ public Builder setWorkerAddressBytes( return this; } + private com.google.protobuf.LazyStringList workerTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureWorkerTagsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + workerTags_ = new com.google.protobuf.LazyStringArrayList(workerTags_); + bitField0_ |= 0x00000001; + } + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public com.google.protobuf.ProtocolStringList + getWorkerTagsList() { + return workerTags_.getUnmodifiableView(); + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public int getWorkerTagsCount() { + return workerTags_.size(); + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public java.lang.String getWorkerTags(int index) { + return workerTags_.get(index); + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public com.google.protobuf.ByteString + getWorkerTagsBytes(int index) { + return workerTags_.getByteString(index); + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public Builder setWorkerTags( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWorkerTagsIsMutable(); + workerTags_.set(index, value); + onChanged(); + return this; + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public Builder addWorkerTags( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWorkerTagsIsMutable(); + workerTags_.add(value); + onChanged(); + return this; + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public Builder addAllWorkerTags( + java.lang.Iterable values) { + ensureWorkerTagsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, workerTags_); + onChanged(); + return this; + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public Builder clearWorkerTags() { + workerTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
    +       * Tags attached to the worker. This allows reading from selected workers.
    +       * For example, by applying a "COLOCATED" tag, tf.data service is able to read
    +       * from the local tf.data worker if one exists, then from off-TF-host workers,
    +       * to avoid cross-TF-host reads.
    +       * 
    + * + * repeated string worker_tags = 10; + */ + public Builder addWorkerTagsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWorkerTagsIsMutable(); + workerTags_.add(value); + onChanged(); + return this; + } + private long heartbeatIntervalMs_ ; /** *
    -       * How often the worker should heartbeat to the master.
    +       * How often the worker should heartbeat to the master. A value of 0 indicates
    +       * that the decision should be left up to the runtime.
            * 
    * * int64 heartbeat_interval_ms = 5; @@ -2476,7 +3249,8 @@ public long getHeartbeatIntervalMs() { } /** *
    -       * How often the worker should heartbeat to the master.
    +       * How often the worker should heartbeat to the master. A value of 0 indicates
    +       * that the decision should be left up to the runtime.
            * 
    * * int64 heartbeat_interval_ms = 5; @@ -2489,7 +3263,8 @@ public Builder setHeartbeatIntervalMs(long value) { } /** *
    -       * How often the worker should heartbeat to the master.
    +       * How often the worker should heartbeat to the master. A value of 0 indicates
    +       * that the decision should be left up to the runtime.
            * 
    * * int64 heartbeat_interval_ms = 5; @@ -2505,7 +3280,8 @@ public Builder clearHeartbeatIntervalMs() { /** *
            * How long to retry requests to the dispatcher before giving up and reporting
    -       * an error.
    +       * an error. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 dispatcher_timeout_ms = 6; @@ -2516,7 +3292,8 @@ public long getDispatcherTimeoutMs() { /** *
            * How long to retry requests to the dispatcher before giving up and reporting
    -       * an error.
    +       * an error. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 dispatcher_timeout_ms = 6; @@ -2530,7 +3307,8 @@ public Builder setDispatcherTimeoutMs(long value) { /** *
            * How long to retry requests to the dispatcher before giving up and reporting
    -       * an error.
    +       * an error. A value of 0 indicates that the decision should be left up to the
    +       * runtime.
            * 
    * * int64 dispatcher_timeout_ms = 6; @@ -2846,21 +3624,23 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa static { java.lang.String[] descriptorData = { "\n-tensorflow/core/protobuf/service_confi" + - "g.proto\022\034tensorflow.data.experimental\"\236\001" + + "g.proto\022\034tensorflow.data.experimental\"\323\001" + "\n\020DispatcherConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010prot" + "ocol\030\002 \001(\t\022\020\n\010work_dir\030\003 \001(\t\022\033\n\023fault_to" + - "lerant_mode\030\004 \001(\010\022 \n\030job_gc_check_interv" + - "al_ms\030\005 \001(\003\022\031\n\021job_gc_timeout_ms\030\006 \001(\003\"\201" + - "\002\n\014WorkerConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010protoco" + - "l\030\002 \001(\t\022\032\n\022dispatcher_address\030\003 \001(\t\022\026\n\016w" + - "orker_address\030\004 \001(\t\022\035\n\025heartbeat_interva" + - "l_ms\030\005 \001(\003\022\035\n\025dispatcher_timeout_ms\030\006 \001(" + - "\003\022\036\n\026data_transfer_protocol\030\007 \001(\t\022\035\n\025dat" + - "a_transfer_address\030\010 \001(\t\022 \n\030shutdown_qui" + - "et_period_ms\030\t \001(\003B\177\n&org.tensorflow.pro" + - "to.data.experimentalZUgithub.com/tensorf" + - "low/tensorflow/tensorflow/go/core/protob" + - "uf/for_core_protos_go_protob\006proto3" + "lerant_mode\030\004 \001(\010\022\030\n\020worker_addresses\030\007 " + + "\003(\t\022 \n\030job_gc_check_interval_ms\030\005 \001(\003\022\031\n" + + "\021job_gc_timeout_ms\030\006 \001(\003\022\031\n\021client_timeo" + + "ut_ms\030\010 \001(\003\"\226\002\n\014WorkerConfig\022\014\n\004port\030\001 \001" + + "(\003\022\020\n\010protocol\030\002 \001(\t\022\032\n\022dispatcher_addre" + + "ss\030\003 \001(\t\022\026\n\016worker_address\030\004 \001(\t\022\023\n\013work" + + "er_tags\030\n \003(\t\022\035\n\025heartbeat_interval_ms\030\005" + + " \001(\003\022\035\n\025dispatcher_timeout_ms\030\006 \001(\003\022\036\n\026d" + + "ata_transfer_protocol\030\007 \001(\t\022\035\n\025data_tran" + + "sfer_address\030\010 \001(\t\022 \n\030shutdown_quiet_per" + + "iod_ms\030\t \001(\003B\177\n&org.tensorflow.proto.dat" + + "a.experimentalZUgithub.com/tensorflow/te" + + "nsorflow/tensorflow/go/core/protobuf/for" + + "_core_protos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -2871,13 +3651,13 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa internal_static_tensorflow_data_experimental_DispatcherConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_experimental_DispatcherConfig_descriptor, - new java.lang.String[] { "Port", "Protocol", "WorkDir", "FaultTolerantMode", "JobGcCheckIntervalMs", "JobGcTimeoutMs", }); + new java.lang.String[] { "Port", "Protocol", "WorkDir", "FaultTolerantMode", "WorkerAddresses", "JobGcCheckIntervalMs", "JobGcTimeoutMs", "ClientTimeoutMs", }); internal_static_tensorflow_data_experimental_WorkerConfig_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_tensorflow_data_experimental_WorkerConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_experimental_WorkerConfig_descriptor, - new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "DataTransferAddress", "ShutdownQuietPeriodMs", }); + new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "WorkerTags", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "DataTransferAddress", "ShutdownQuietPeriodMs", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java index 21b0beecfb2..9648902d4e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java @@ -43,6 +43,7 @@ private ModelProto( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -54,29 +55,29 @@ private ModelProto( done = true; break; case 10: { - org.tensorflow.proto.data.model.ModelProto.Node.Builder subBuilder = null; - if (output_ != null) { - subBuilder = output_.toBuilder(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + nodes_ = com.google.protobuf.MapField.newMapField( + NodesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; } - output_ = input.readMessage(org.tensorflow.proto.data.model.ModelProto.Node.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(output_); - output_ = subBuilder.buildPartial(); - } - + com.google.protobuf.MapEntry + nodes__ = input.readMessage( + NodesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + nodes_.getMutableMap().put( + nodes__.getKey(), nodes__.getValue()); break; } case 16: { - idCounter_ = input.readInt64(); + output_ = input.readInt64(); break; } case 24: { - collectResourceUsage_ = input.readBool(); + idCounter_ = input.readInt64(); break; } - case 34: { + case 42: { org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder subBuilder = null; if (optimizationParams_ != null) { subBuilder = optimizationParams_.toBuilder(); @@ -113,6 +114,18 @@ private ModelProto( return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_descriptor; } + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetNodes(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { @@ -285,47 +298,28 @@ org.tensorflow.proto.data.model.ModelProto.Node.ParameterOrBuilder getParameters /** *
    -     * Inputs of this node.
    -     * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - java.util.List - getInputsList(); - /** - *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index); + java.util.List getInputsList(); /** *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ int getInputsCount(); /** *
    -     * Inputs of this node.
    -     * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - java.util.List - getInputsOrBuilderList(); - /** - *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( - int index); + long getInputs(int index); /** *
    @@ -383,7 +377,7 @@ private Node(com.google.protobuf.GeneratedMessageV3.Builder builder) {
         private Node() {
           name_ = "";
           parameters_ = java.util.Collections.emptyList();
    -      inputs_ = java.util.Collections.emptyList();
    +      inputs_ = emptyLongList();
           nodeClass_ = 0;
         }
     
    @@ -488,13 +482,25 @@ private Node(
                   inputProcessingTimeCount_ = input.readInt64();
                   break;
                 }
    -            case 114: {
    +            case 112: {
                   if (!((mutable_bitField0_ & 0x00000002) != 0)) {
    -                inputs_ = new java.util.ArrayList();
    +                inputs_ = newLongList();
                     mutable_bitField0_ |= 0x00000002;
                   }
    -              inputs_.add(
    -                  input.readMessage(org.tensorflow.proto.data.model.ModelProto.Node.parser(), extensionRegistry));
    +              inputs_.addLong(input.readInt64());
    +              break;
    +            }
    +            case 114: {
    +              int length = input.readRawVarint32();
    +              int limit = input.pushLimit(length);
    +              if (!((mutable_bitField0_ & 0x00000002) != 0) && input.getBytesUntilLimit() > 0) {
    +                inputs_ = newLongList();
    +                mutable_bitField0_ |= 0x00000002;
    +              }
    +              while (input.getBytesUntilLimit() > 0) {
    +                inputs_.addLong(input.readInt64());
    +              }
    +              input.popLimit(limit);
                   break;
                 }
                 case 120: {
    @@ -532,7 +538,7 @@ private Node(
               parameters_ = java.util.Collections.unmodifiableList(parameters_);
             }
             if (((mutable_bitField0_ & 0x00000002) != 0)) {
    -          inputs_ = java.util.Collections.unmodifiableList(inputs_);
    +          inputs_.makeImmutable(); // C
             }
             this.unknownFields = unknownFields.build();
             makeExtensionsImmutable();
    @@ -1812,59 +1818,39 @@ public long getInputProcessingTimeCount() {
         }
     
         public static final int INPUTS_FIELD_NUMBER = 14;
    -    private java.util.List inputs_;
    -    /**
    -     * 
    -     * Inputs of this node.
    -     * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public java.util.List getInputsList() { - return inputs_; - } + private com.google.protobuf.Internal.LongList inputs_; /** *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - public java.util.List - getInputsOrBuilderList() { + public java.util.List + getInputsList() { return inputs_; } /** *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ public int getInputsCount() { return inputs_.size(); } /** *
    -     * Inputs of this node.
    -     * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index) { - return inputs_.get(index); - } - /** - *
    -     * Inputs of this node.
    +     * IDs of inputs of this node.
          * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( - int index) { - return inputs_.get(index); + public long getInputs(int index) { + return inputs_.getLong(index); } + private int inputsMemoizedSerializedSize = -1; public static final int NODE_CLASS_FIELD_NUMBER = 15; private int nodeClass_; @@ -1933,6 +1919,7 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); if (id_ != 0L) { output.writeInt64(1, id_); } @@ -1972,8 +1959,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (inputProcessingTimeCount_ != 0L) { output.writeInt64(13, inputProcessingTimeCount_); } + if (getInputsList().size() > 0) { + output.writeUInt32NoTag(114); + output.writeUInt32NoTag(inputsMemoizedSerializedSize); + } for (int i = 0; i < inputs_.size(); i++) { - output.writeMessage(14, inputs_.get(i)); + output.writeInt64NoTag(inputs_.getLong(i)); } if (nodeClass_ != org.tensorflow.proto.data.model.NodeClass.UNKNOWN.getNumber()) { output.writeEnum(15, nodeClass_); @@ -2044,9 +2035,19 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(13, inputProcessingTimeCount_); } - for (int i = 0; i < inputs_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(14, inputs_.get(i)); + { + int dataSize = 0; + for (int i = 0; i < inputs_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt64SizeNoTag(inputs_.getLong(i)); + } + size += dataSize; + if (!getInputsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + inputsMemoizedSerializedSize = dataSize; } if (nodeClass_ != org.tensorflow.proto.data.model.NodeClass.UNKNOWN.getNumber()) { size += com.google.protobuf.CodedOutputStream @@ -2306,7 +2307,6 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getParametersFieldBuilder(); - getInputsFieldBuilder(); } } @java.lang.Override @@ -2342,12 +2342,8 @@ public Builder clear() { inputProcessingTimeCount_ = 0L; - if (inputsBuilder_ == null) { - inputs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - inputsBuilder_.clear(); - } + inputs_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000002); nodeClass_ = 0; ratio_ = 0D; @@ -2402,15 +2398,11 @@ public org.tensorflow.proto.data.model.ModelProto.Node buildPartial() { } result.inputProcessingTimeSum_ = inputProcessingTimeSum_; result.inputProcessingTimeCount_ = inputProcessingTimeCount_; - if (inputsBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0)) { - inputs_ = java.util.Collections.unmodifiableList(inputs_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.inputs_ = inputs_; - } else { - result.inputs_ = inputsBuilder_.build(); + if (((bitField0_ & 0x00000002) != 0)) { + inputs_.makeImmutable(); + bitField0_ = (bitField0_ & ~0x00000002); } + result.inputs_ = inputs_; result.nodeClass_ = nodeClass_; result.ratio_ = ratio_; result.memoryRatio_ = memoryRatio_; @@ -2525,31 +2517,15 @@ public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto.Node other) if (other.getInputProcessingTimeCount() != 0L) { setInputProcessingTimeCount(other.getInputProcessingTimeCount()); } - if (inputsBuilder_ == null) { - if (!other.inputs_.isEmpty()) { - if (inputs_.isEmpty()) { - inputs_ = other.inputs_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureInputsIsMutable(); - inputs_.addAll(other.inputs_); - } - onChanged(); - } - } else { - if (!other.inputs_.isEmpty()) { - if (inputsBuilder_.isEmpty()) { - inputsBuilder_.dispose(); - inputsBuilder_ = null; - inputs_ = other.inputs_; - bitField0_ = (bitField0_ & ~0x00000002); - inputsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getInputsFieldBuilder() : null; - } else { - inputsBuilder_.addAllMessages(other.inputs_); - } + if (!other.inputs_.isEmpty()) { + if (inputs_.isEmpty()) { + inputs_ = other.inputs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureInputsIsMutable(); + inputs_.addAll(other.inputs_); } + onChanged(); } if (other.nodeClass_ != 0) { setNodeClassValue(other.getNodeClassValue()); @@ -3400,317 +3376,100 @@ public Builder clearInputProcessingTimeCount() { return this; } - private java.util.List inputs_ = - java.util.Collections.emptyList(); + private com.google.protobuf.Internal.LongList inputs_ = emptyLongList(); private void ensureInputsIsMutable() { if (!((bitField0_ & 0x00000002) != 0)) { - inputs_ = new java.util.ArrayList(inputs_); + inputs_ = mutableCopy(inputs_); bitField0_ |= 0x00000002; } } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> inputsBuilder_; - /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - public java.util.List getInputsList() { - if (inputsBuilder_ == null) { - return java.util.Collections.unmodifiableList(inputs_); - } else { - return inputsBuilder_.getMessageList(); - } + public java.util.List + getInputsList() { + return ((bitField0_ & 0x00000002) != 0) ? + java.util.Collections.unmodifiableList(inputs_) : inputs_; } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ public int getInputsCount() { - if (inputsBuilder_ == null) { - return inputs_.size(); - } else { - return inputsBuilder_.getCount(); - } - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.Node getInputs(int index) { - if (inputsBuilder_ == null) { - return inputs_.get(index); - } else { - return inputsBuilder_.getMessage(index); - } + return inputs_.size(); } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - public Builder setInputs( - int index, org.tensorflow.proto.data.model.ModelProto.Node value) { - if (inputsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureInputsIsMutable(); - inputs_.set(index, value); - onChanged(); - } else { - inputsBuilder_.setMessage(index, value); - } - return this; + public long getInputs(int index) { + return inputs_.getLong(index); } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ public Builder setInputs( - int index, org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { - if (inputsBuilder_ == null) { - ensureInputsIsMutable(); - inputs_.set(index, builderForValue.build()); - onChanged(); - } else { - inputsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public Builder addInputs(org.tensorflow.proto.data.model.ModelProto.Node value) { - if (inputsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureInputsIsMutable(); - inputs_.add(value); - onChanged(); - } else { - inputsBuilder_.addMessage(value); - } - return this; - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public Builder addInputs( - int index, org.tensorflow.proto.data.model.ModelProto.Node value) { - if (inputsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureInputsIsMutable(); - inputs_.add(index, value); - onChanged(); - } else { - inputsBuilder_.addMessage(index, value); - } - return this; - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public Builder addInputs( - org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { - if (inputsBuilder_ == null) { - ensureInputsIsMutable(); - inputs_.add(builderForValue.build()); - onChanged(); - } else { - inputsBuilder_.addMessage(builderForValue.build()); - } + int index, long value) { + ensureInputsIsMutable(); + inputs_.setLong(index, value); + onChanged(); return this; } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ - public Builder addInputs( - int index, org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { - if (inputsBuilder_ == null) { - ensureInputsIsMutable(); - inputs_.add(index, builderForValue.build()); - onChanged(); - } else { - inputsBuilder_.addMessage(index, builderForValue.build()); - } + public Builder addInputs(long value) { + ensureInputsIsMutable(); + inputs_.addLong(value); + onChanged(); return this; } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ public Builder addAllInputs( - java.lang.Iterable values) { - if (inputsBuilder_ == null) { - ensureInputsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, inputs_); - onChanged(); - } else { - inputsBuilder_.addAllMessages(values); - } + java.lang.Iterable values) { + ensureInputsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, inputs_); + onChanged(); return this; } /** *
    -       * Inputs of this node.
    +       * IDs of inputs of this node.
            * 
    * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; + * repeated int64 inputs = 14; */ public Builder clearInputs() { - if (inputsBuilder_ == null) { - inputs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - inputsBuilder_.clear(); - } - return this; - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public Builder removeInputs(int index) { - if (inputsBuilder_ == null) { - ensureInputsIsMutable(); - inputs_.remove(index); - onChanged(); - } else { - inputsBuilder_.remove(index); - } + inputs_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); return this; } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.Node.Builder getInputsBuilder( - int index) { - return getInputsFieldBuilder().getBuilder(index); - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getInputsOrBuilder( - int index) { - if (inputsBuilder_ == null) { - return inputs_.get(index); } else { - return inputsBuilder_.getMessageOrBuilder(index); - } - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public java.util.List - getInputsOrBuilderList() { - if (inputsBuilder_ != null) { - return inputsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(inputs_); - } - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.Node.Builder addInputsBuilder() { - return getInputsFieldBuilder().addBuilder( - org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()); - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public org.tensorflow.proto.data.model.ModelProto.Node.Builder addInputsBuilder( - int index) { - return getInputsFieldBuilder().addBuilder( - index, org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()); - } - /** - *
    -       * Inputs of this node.
    -       * 
    - * - * repeated .tensorflow.data.model.ModelProto.Node inputs = 14; - */ - public java.util.List - getInputsBuilderList() { - return getInputsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> - getInputsFieldBuilder() { - if (inputsBuilder_ == null) { - inputsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder>( - inputs_, - ((bitField0_ & 0x00000002) != 0), - getParentForChildren(), - isClean()); - inputs_ = null; - } - return inputsBuilder_; - } private int nodeClass_ = 0; /** @@ -4722,82 +4481,140 @@ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getDefaultI } - public static final int OUTPUT_FIELD_NUMBER = 1; - private org.tensorflow.proto.data.model.ModelProto.Node output_; + public static final int NODES_FIELD_NUMBER = 1; + private static final class NodesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.Long, org.tensorflow.proto.data.model.ModelProto.Node> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_NodesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.INT64, + 0L, + com.google.protobuf.WireFormat.FieldType.MESSAGE, + org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance()); + } + private com.google.protobuf.MapField< + java.lang.Long, org.tensorflow.proto.data.model.ModelProto.Node> nodes_; + private com.google.protobuf.MapField + internalGetNodes() { + if (nodes_ == null) { + return com.google.protobuf.MapField.emptyMapField( + NodesDefaultEntryHolder.defaultEntry); + } + return nodes_; + } + + public int getNodesCount() { + return internalGetNodes().getMap().size(); + } + /** + *
    +   * Map of node IDs to nodes of this model.
    +   * 
    + * + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; + */ + + public boolean containsNodes( + long key) { + + return internalGetNodes().getMap().containsKey(key); + } + /** + * Use {@link #getNodesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getNodes() { + return getNodesMap(); + } /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public boolean hasOutput() { - return output_ != null; + + public java.util.Map getNodesMap() { + return internalGetNodes().getMap(); } /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public org.tensorflow.proto.data.model.ModelProto.Node getOutput() { - return output_ == null ? org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; + + public org.tensorflow.proto.data.model.ModelProto.Node getNodesOrDefault( + long key, + org.tensorflow.proto.data.model.ModelProto.Node defaultValue) { + + java.util.Map map = + internalGetNodes().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; } /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder() { - return getOutput(); + + public org.tensorflow.proto.data.model.ModelProto.Node getNodesOrThrow( + long key) { + + java.util.Map map = + internalGetNodes().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); } - public static final int ID_COUNTER_FIELD_NUMBER = 2; - private long idCounter_; + public static final int OUTPUT_FIELD_NUMBER = 2; + private long output_; /** *
    -   * Counter for node IDs of this model.
    +   * ID of the output node of this model.
        * 
    * - * int64 id_counter = 2; + * int64 output = 2; */ - public long getIdCounter() { - return idCounter_; + public long getOutput() { + return output_; } - public static final int COLLECT_RESOURCE_USAGE_FIELD_NUMBER = 3; - private boolean collectResourceUsage_; + public static final int ID_COUNTER_FIELD_NUMBER = 3; + private long idCounter_; /** *
    -   * Indicates whether the modeling framework should collect resource usage,
    -   * e.g. CPU, memory.
    +   * Counter for node IDs of this model.
        * 
    * - * bool collect_resource_usage = 3; + * int64 id_counter = 3; */ - public boolean getCollectResourceUsage() { - return collectResourceUsage_; + public long getIdCounter() { + return idCounter_; } - public static final int OPTIMIZATION_PARAMS_FIELD_NUMBER = 4; + public static final int OPTIMIZATION_PARAMS_FIELD_NUMBER = 5; private org.tensorflow.proto.data.model.ModelProto.OptimizationParams optimizationParams_; /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public boolean hasOptimizationParams() { return optimizationParams_ != null; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams() { return optimizationParams_ == null ? org.tensorflow.proto.data.model.ModelProto.OptimizationParams.getDefaultInstance() : optimizationParams_; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder() { return getOptimizationParams(); @@ -4817,17 +4634,20 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (output_ != null) { - output.writeMessage(1, getOutput()); + com.google.protobuf.GeneratedMessageV3 + .serializeLongMapTo( + output, + internalGetNodes(), + NodesDefaultEntryHolder.defaultEntry, + 1); + if (output_ != 0L) { + output.writeInt64(2, output_); } if (idCounter_ != 0L) { - output.writeInt64(2, idCounter_); - } - if (collectResourceUsage_ != false) { - output.writeBool(3, collectResourceUsage_); + output.writeInt64(3, idCounter_); } if (optimizationParams_ != null) { - output.writeMessage(4, getOptimizationParams()); + output.writeMessage(5, getOptimizationParams()); } unknownFields.writeTo(output); } @@ -4838,21 +4658,27 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (output_ != null) { + for (java.util.Map.Entry entry + : internalGetNodes().getMap().entrySet()) { + com.google.protobuf.MapEntry + nodes__ = NodesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getOutput()); + .computeMessageSize(1, nodes__); } - if (idCounter_ != 0L) { + if (output_ != 0L) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, idCounter_); + .computeInt64Size(2, output_); } - if (collectResourceUsage_ != false) { + if (idCounter_ != 0L) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, collectResourceUsage_); + .computeInt64Size(3, idCounter_); } if (optimizationParams_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, getOptimizationParams()); + .computeMessageSize(5, getOptimizationParams()); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -4869,15 +4695,12 @@ public boolean equals(final java.lang.Object obj) { } org.tensorflow.proto.data.model.ModelProto other = (org.tensorflow.proto.data.model.ModelProto) obj; - if (hasOutput() != other.hasOutput()) return false; - if (hasOutput()) { - if (!getOutput() - .equals(other.getOutput())) return false; - } + if (!internalGetNodes().equals( + other.internalGetNodes())) return false; + if (getOutput() + != other.getOutput()) return false; if (getIdCounter() != other.getIdCounter()) return false; - if (getCollectResourceUsage() - != other.getCollectResourceUsage()) return false; if (hasOptimizationParams() != other.hasOptimizationParams()) return false; if (hasOptimizationParams()) { if (!getOptimizationParams() @@ -4894,16 +4717,16 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasOutput()) { - hash = (37 * hash) + OUTPUT_FIELD_NUMBER; - hash = (53 * hash) + getOutput().hashCode(); + if (!internalGetNodes().getMap().isEmpty()) { + hash = (37 * hash) + NODES_FIELD_NUMBER; + hash = (53 * hash) + internalGetNodes().hashCode(); } + hash = (37 * hash) + OUTPUT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getOutput()); hash = (37 * hash) + ID_COUNTER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getIdCounter()); - hash = (37 * hash) + COLLECT_RESOURCE_USAGE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getCollectResourceUsage()); if (hasOptimizationParams()) { hash = (37 * hash) + OPTIMIZATION_PARAMS_FIELD_NUMBER; hash = (53 * hash) + getOptimizationParams().hashCode(); @@ -5020,6 +4843,28 @@ public static final class Builder extends return org.tensorflow.proto.data.model.ModelProtos.internal_static_tensorflow_data_model_ModelProto_descriptor; } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetNodes(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 1: + return internalGetMutableNodes(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { @@ -5046,15 +4891,10 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - if (outputBuilder_ == null) { - output_ = null; - } else { - output_ = null; - outputBuilder_ = null; - } - idCounter_ = 0L; + internalGetMutableNodes().clear(); + output_ = 0L; - collectResourceUsage_ = false; + idCounter_ = 0L; if (optimizationParamsBuilder_ == null) { optimizationParams_ = null; @@ -5088,13 +4928,11 @@ public org.tensorflow.proto.data.model.ModelProto build() { @java.lang.Override public org.tensorflow.proto.data.model.ModelProto buildPartial() { org.tensorflow.proto.data.model.ModelProto result = new org.tensorflow.proto.data.model.ModelProto(this); - if (outputBuilder_ == null) { - result.output_ = output_; - } else { - result.output_ = outputBuilder_.build(); - } + int from_bitField0_ = bitField0_; + result.nodes_ = internalGetNodes(); + result.nodes_.makeImmutable(); + result.output_ = output_; result.idCounter_ = idCounter_; - result.collectResourceUsage_ = collectResourceUsage_; if (optimizationParamsBuilder_ == null) { result.optimizationParams_ = optimizationParams_; } else { @@ -5148,15 +4986,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.tensorflow.proto.data.model.ModelProto other) { if (other == org.tensorflow.proto.data.model.ModelProto.getDefaultInstance()) return this; - if (other.hasOutput()) { - mergeOutput(other.getOutput()); + internalGetMutableNodes().mergeFrom( + other.internalGetNodes()); + if (other.getOutput() != 0L) { + setOutput(other.getOutput()); } if (other.getIdCounter() != 0L) { setIdCounter(other.getIdCounter()); } - if (other.getCollectResourceUsage() != false) { - setCollectResourceUsage(other.getCollectResourceUsage()); - } if (other.hasOptimizationParams()) { mergeOptimizationParams(other.getOptimizationParams()); } @@ -5188,235 +5025,231 @@ public Builder mergeFrom( } return this; } + private int bitField0_; - private org.tensorflow.proto.data.model.ModelProto.Node output_; - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> outputBuilder_; + private com.google.protobuf.MapField< + java.lang.Long, org.tensorflow.proto.data.model.ModelProto.Node> nodes_; + private com.google.protobuf.MapField + internalGetNodes() { + if (nodes_ == null) { + return com.google.protobuf.MapField.emptyMapField( + NodesDefaultEntryHolder.defaultEntry); + } + return nodes_; + } + private com.google.protobuf.MapField + internalGetMutableNodes() { + onChanged();; + if (nodes_ == null) { + nodes_ = com.google.protobuf.MapField.newMapField( + NodesDefaultEntryHolder.defaultEntry); + } + if (!nodes_.isMutable()) { + nodes_ = nodes_.copy(); + } + return nodes_; + } + + public int getNodesCount() { + return internalGetNodes().getMap().size(); + } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public boolean hasOutput() { - return outputBuilder_ != null || output_ != null; + + public boolean containsNodes( + long key) { + + return internalGetNodes().getMap().containsKey(key); } /** - *
    -     * Output node of this model.
    -     * 
    - * - * .tensorflow.data.model.ModelProto.Node output = 1; + * Use {@link #getNodesMap()} instead. */ - public org.tensorflow.proto.data.model.ModelProto.Node getOutput() { - if (outputBuilder_ == null) { - return output_ == null ? org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; - } else { - return outputBuilder_.getMessage(); - } + @java.lang.Deprecated + public java.util.Map getNodes() { + return getNodesMap(); } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public Builder setOutput(org.tensorflow.proto.data.model.ModelProto.Node value) { - if (outputBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - output_ = value; - onChanged(); - } else { - outputBuilder_.setMessage(value); - } - return this; + public java.util.Map getNodesMap() { + return internalGetNodes().getMap(); } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public Builder setOutput( - org.tensorflow.proto.data.model.ModelProto.Node.Builder builderForValue) { - if (outputBuilder_ == null) { - output_ = builderForValue.build(); - onChanged(); - } else { - outputBuilder_.setMessage(builderForValue.build()); - } - return this; + public org.tensorflow.proto.data.model.ModelProto.Node getNodesOrDefault( + long key, + org.tensorflow.proto.data.model.ModelProto.Node defaultValue) { + + java.util.Map map = + internalGetNodes().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public Builder mergeOutput(org.tensorflow.proto.data.model.ModelProto.Node value) { - if (outputBuilder_ == null) { - if (output_ != null) { - output_ = - org.tensorflow.proto.data.model.ModelProto.Node.newBuilder(output_).mergeFrom(value).buildPartial(); - } else { - output_ = value; - } - onChanged(); - } else { - outputBuilder_.mergeFrom(value); + + public org.tensorflow.proto.data.model.ModelProto.Node getNodesOrThrow( + long key) { + + java.util.Map map = + internalGetNodes().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); } + return map.get(key); + } + public Builder clearNodes() { + internalGetMutableNodes().getMutableMap() + .clear(); return this; } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public Builder clearOutput() { - if (outputBuilder_ == null) { - output_ = null; - onChanged(); - } else { - output_ = null; - outputBuilder_ = null; - } + public Builder removeNodes( + long key) { + + internalGetMutableNodes().getMutableMap() + .remove(key); return this; } /** - *
    -     * Output node of this model.
    -     * 
    - * - * .tensorflow.data.model.ModelProto.Node output = 1; + * Use alternate mutation accessors instead. */ - public org.tensorflow.proto.data.model.ModelProto.Node.Builder getOutputBuilder() { - - onChanged(); - return getOutputFieldBuilder().getBuilder(); + @java.lang.Deprecated + public java.util.Map + getMutableNodes() { + return internalGetMutableNodes().getMutableMap(); } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - public org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder() { - if (outputBuilder_ != null) { - return outputBuilder_.getMessageOrBuilder(); - } else { - return output_ == null ? - org.tensorflow.proto.data.model.ModelProto.Node.getDefaultInstance() : output_; - } + public Builder putNodes( + long key, + org.tensorflow.proto.data.model.ModelProto.Node value) { + + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableNodes().getMutableMap() + .put(key, value); + return this; } /** *
    -     * Output node of this model.
    +     * Map of node IDs to nodes of this model.
          * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder> - getOutputFieldBuilder() { - if (outputBuilder_ == null) { - outputBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.tensorflow.proto.data.model.ModelProto.Node, org.tensorflow.proto.data.model.ModelProto.Node.Builder, org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder>( - getOutput(), - getParentForChildren(), - isClean()); - output_ = null; - } - return outputBuilder_; + + public Builder putAllNodes( + java.util.Map values) { + internalGetMutableNodes().getMutableMap() + .putAll(values); + return this; } - private long idCounter_ ; + private long output_ ; /** *
    -     * Counter for node IDs of this model.
    +     * ID of the output node of this model.
          * 
    * - * int64 id_counter = 2; + * int64 output = 2; */ - public long getIdCounter() { - return idCounter_; + public long getOutput() { + return output_; } /** *
    -     * Counter for node IDs of this model.
    +     * ID of the output node of this model.
          * 
    * - * int64 id_counter = 2; + * int64 output = 2; */ - public Builder setIdCounter(long value) { + public Builder setOutput(long value) { - idCounter_ = value; + output_ = value; onChanged(); return this; } /** *
    -     * Counter for node IDs of this model.
    +     * ID of the output node of this model.
          * 
    * - * int64 id_counter = 2; + * int64 output = 2; */ - public Builder clearIdCounter() { + public Builder clearOutput() { - idCounter_ = 0L; + output_ = 0L; onChanged(); return this; } - private boolean collectResourceUsage_ ; + private long idCounter_ ; /** *
    -     * Indicates whether the modeling framework should collect resource usage,
    -     * e.g. CPU, memory.
    +     * Counter for node IDs of this model.
          * 
    * - * bool collect_resource_usage = 3; + * int64 id_counter = 3; */ - public boolean getCollectResourceUsage() { - return collectResourceUsage_; + public long getIdCounter() { + return idCounter_; } /** *
    -     * Indicates whether the modeling framework should collect resource usage,
    -     * e.g. CPU, memory.
    +     * Counter for node IDs of this model.
          * 
    * - * bool collect_resource_usage = 3; + * int64 id_counter = 3; */ - public Builder setCollectResourceUsage(boolean value) { + public Builder setIdCounter(long value) { - collectResourceUsage_ = value; + idCounter_ = value; onChanged(); return this; } /** *
    -     * Indicates whether the modeling framework should collect resource usage,
    -     * e.g. CPU, memory.
    +     * Counter for node IDs of this model.
          * 
    * - * bool collect_resource_usage = 3; + * int64 id_counter = 3; */ - public Builder clearCollectResourceUsage() { + public Builder clearIdCounter() { - collectResourceUsage_ = false; + idCounter_ = 0L; onChanged(); return this; } @@ -5425,13 +5258,13 @@ public Builder clearCollectResourceUsage() { private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.proto.data.model.ModelProto.OptimizationParams, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder, org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder> optimizationParamsBuilder_; /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public boolean hasOptimizationParams() { return optimizationParamsBuilder_ != null || optimizationParams_ != null; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams() { if (optimizationParamsBuilder_ == null) { @@ -5441,7 +5274,7 @@ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimiza } } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public Builder setOptimizationParams(org.tensorflow.proto.data.model.ModelProto.OptimizationParams value) { if (optimizationParamsBuilder_ == null) { @@ -5457,7 +5290,7 @@ public Builder setOptimizationParams(org.tensorflow.proto.data.model.ModelProto. return this; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public Builder setOptimizationParams( org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder builderForValue) { @@ -5471,7 +5304,7 @@ public Builder setOptimizationParams( return this; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public Builder mergeOptimizationParams(org.tensorflow.proto.data.model.ModelProto.OptimizationParams value) { if (optimizationParamsBuilder_ == null) { @@ -5489,7 +5322,7 @@ public Builder mergeOptimizationParams(org.tensorflow.proto.data.model.ModelProt return this; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public Builder clearOptimizationParams() { if (optimizationParamsBuilder_ == null) { @@ -5503,7 +5336,7 @@ public Builder clearOptimizationParams() { return this; } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder getOptimizationParamsBuilder() { @@ -5511,7 +5344,7 @@ public org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder get return getOptimizationParamsFieldBuilder().getBuilder(); } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ public org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder() { if (optimizationParamsBuilder_ != null) { @@ -5522,7 +5355,7 @@ public org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder ge } } /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.proto.data.model.ModelProto.OptimizationParams, org.tensorflow.proto.data.model.ModelProto.OptimizationParams.Builder, org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder> diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java index 956471c72a0..609e1a73f0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtoOrBuilder.java @@ -9,58 +9,86 @@ public interface ModelProtoOrBuilder extends /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - boolean hasOutput(); + int getNodesCount(); /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - org.tensorflow.proto.data.model.ModelProto.Node getOutput(); + boolean containsNodes( + long key); + /** + * Use {@link #getNodesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getNodes(); /** *
    -   * Output node of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * .tensorflow.data.model.ModelProto.Node output = 1; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; + */ + java.util.Map + getNodesMap(); + /** + *
    +   * Map of node IDs to nodes of this model.
    +   * 
    + * + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - org.tensorflow.proto.data.model.ModelProto.NodeOrBuilder getOutputOrBuilder(); + org.tensorflow.proto.data.model.ModelProto.Node getNodesOrDefault( + long key, + org.tensorflow.proto.data.model.ModelProto.Node defaultValue); /** *
    -   * Counter for node IDs of this model.
    +   * Map of node IDs to nodes of this model.
        * 
    * - * int64 id_counter = 2; + * map<int64, .tensorflow.data.model.ModelProto.Node> nodes = 1; */ - long getIdCounter(); + + org.tensorflow.proto.data.model.ModelProto.Node getNodesOrThrow( + long key); /** *
    -   * Indicates whether the modeling framework should collect resource usage,
    -   * e.g. CPU, memory.
    +   * ID of the output node of this model.
        * 
    * - * bool collect_resource_usage = 3; + * int64 output = 2; */ - boolean getCollectResourceUsage(); + long getOutput(); + + /** + *
    +   * Counter for node IDs of this model.
    +   * 
    + * + * int64 id_counter = 3; + */ + long getIdCounter(); /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ boolean hasOptimizationParams(); /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ org.tensorflow.proto.data.model.ModelProto.OptimizationParams getOptimizationParams(); /** - * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 4; + * .tensorflow.data.model.ModelProto.OptimizationParams optimization_params = 5; */ org.tensorflow.proto.data.model.ModelProto.OptimizationParamsOrBuilder getOptimizationParamsOrBuilder(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java index b1bb7add402..aa3343dfac6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java @@ -29,6 +29,11 @@ public static void registerAllExtensions( static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_tensorflow_data_model_ModelProto_Node_Parameter_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_model_ModelProto_NodesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_model_ModelProto_NodesEntry_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor; static final @@ -44,40 +49,41 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n%tensorflow/core/framework/model.proto\022" + - "\025tensorflow.data.model\"\313\007\n\nModelProto\0226\n" + - "\006output\030\001 \001(\0132&.tensorflow.data.model.Mo" + - "delProto.Node\022\022\n\nid_counter\030\002 \001(\003\022\036\n\026col" + - "lect_resource_usage\030\003 \001(\010\022Q\n\023optimizatio" + - "n_params\030\004 \001(\01324.tensorflow.data.model.M" + - "odelProto.OptimizationParams\032\347\004\n\004Node\022\n\n" + - "\002id\030\001 \001(\003\022\014\n\004name\030\002 \001(\t\022\020\n\010autotune\030\003 \001(" + - "\010\022\026\n\016buffered_bytes\030\004 \001(\003\022\031\n\021buffered_el" + - "ements\030\005 \001(\003\022\026\n\016bytes_consumed\030\006 \001(\003\022\026\n\016" + - "bytes_produced\030\007 \001(\003\022\024\n\014num_elements\030\010 \001" + - "(\003\022\027\n\017processing_time\030\t \001(\003\022\026\n\016record_me" + - "trics\030\n \001(\010\022D\n\nparameters\030\013 \003(\01320.tensor" + - "flow.data.model.ModelProto.Node.Paramete" + - "r\022!\n\031input_processing_time_sum\030\014 \001(\001\022#\n\033" + - "input_processing_time_count\030\r \001(\003\0226\n\006inp" + - "uts\030\016 \003(\0132&.tensorflow.data.model.ModelP" + - "roto.Node\0224\n\nnode_class\030\017 \001(\0162 .tensorfl" + - "ow.data.model.NodeClass\022\r\n\005ratio\030\020 \001(\001\022\024" + - "\n\014memory_ratio\030\021 \001(\001\032h\n\tParameter\022\014\n\004nam" + - "e\030\001 \001(\t\022\r\n\005value\030\002 \001(\001\022\023\n\013state_value\030\003 " + - "\001(\001\022\013\n\003min\030\004 \001(\001\022\013\n\003max\030\005 \001(\001\022\017\n\007tunable" + - "\030\006 \001(\010\032\223\001\n\022OptimizationParams\022;\n\talgorit" + - "hm\030\001 \001(\0162(.tensorflow.data.model.Autotun" + - "eAlgorithm\022\022\n\ncpu_budget\030\002 \001(\003\022\022\n\nram_bu" + - "dget\030\003 \001(\003\022\030\n\020model_input_time\030\004 \001(\001*\203\001\n" + - "\tNodeClass\022\013\n\007UNKNOWN\020\000\022\023\n\017INTERLEAVE_MA" + - "NY\020\001\022\031\n\025ASYNC_INTERLEAVE_MANY\020\002\022\017\n\013KNOWN" + - "_RATIO\020\003\022\025\n\021ASYNC_KNOWN_RATIO\020\004\022\021\n\rUNKNO" + - "WN_RATIO\020\005*9\n\021AutotuneAlgorithm\022\016\n\nHILL_" + - "CLIMB\020\000\022\024\n\020GRADIENT_DESCENT\020\001B\201\001\n\037org.te" + - "nsorflow.proto.data.modelB\013ModelProtosP\001" + - "ZLgithub.com/tensorflow/tensorflow/tenso" + - "rflow/go/core/framework/model_go_proto\370\001" + - "\001b\006proto3" + "\025tensorflow.data.model\"\364\007\n\nModelProto\022;\n" + + "\005nodes\030\001 \003(\0132,.tensorflow.data.model.Mod" + + "elProto.NodesEntry\022\016\n\006output\030\002 \001(\003\022\022\n\nid" + + "_counter\030\003 \001(\003\022Q\n\023optimization_params\030\005 " + + "\001(\01324.tensorflow.data.model.ModelProto.O" + + "ptimizationParams\032\277\004\n\004Node\022\n\n\002id\030\001 \001(\003\022\014" + + "\n\004name\030\002 \001(\t\022\020\n\010autotune\030\003 \001(\010\022\026\n\016buffer" + + "ed_bytes\030\004 \001(\003\022\031\n\021buffered_elements\030\005 \001(" + + "\003\022\026\n\016bytes_consumed\030\006 \001(\003\022\026\n\016bytes_produ" + + "ced\030\007 \001(\003\022\024\n\014num_elements\030\010 \001(\003\022\027\n\017proce" + + "ssing_time\030\t \001(\003\022\026\n\016record_metrics\030\n \001(\010" + + "\022D\n\nparameters\030\013 \003(\01320.tensorflow.data.m" + + "odel.ModelProto.Node.Parameter\022!\n\031input_" + + "processing_time_sum\030\014 \001(\001\022#\n\033input_proce" + + "ssing_time_count\030\r \001(\003\022\016\n\006inputs\030\016 \003(\003\0224" + + "\n\nnode_class\030\017 \001(\0162 .tensorflow.data.mod" + + "el.NodeClass\022\r\n\005ratio\030\020 \001(\001\022\024\n\014memory_ra" + + "tio\030\021 \001(\001\032h\n\tParameter\022\014\n\004name\030\001 \001(\t\022\r\n\005" + + "value\030\002 \001(\001\022\023\n\013state_value\030\003 \001(\001\022\013\n\003min\030" + + "\004 \001(\001\022\013\n\003max\030\005 \001(\001\022\017\n\007tunable\030\006 \001(\010\032T\n\nN" + + "odesEntry\022\013\n\003key\030\001 \001(\003\0225\n\005value\030\002 \001(\0132&." + + "tensorflow.data.model.ModelProto.Node:\0028" + + "\001\032\223\001\n\022OptimizationParams\022;\n\talgorithm\030\001 " + + "\001(\0162(.tensorflow.data.model.AutotuneAlgo" + + "rithm\022\022\n\ncpu_budget\030\002 \001(\003\022\022\n\nram_budget\030" + + "\003 \001(\003\022\030\n\020model_input_time\030\004 \001(\001J\004\010\004\020\005*\203\001" + + "\n\tNodeClass\022\013\n\007UNKNOWN\020\000\022\023\n\017INTERLEAVE_M" + + "ANY\020\001\022\031\n\025ASYNC_INTERLEAVE_MANY\020\002\022\017\n\013KNOW" + + "N_RATIO\020\003\022\025\n\021ASYNC_KNOWN_RATIO\020\004\022\021\n\rUNKN" + + "OWN_RATIO\020\005*9\n\021AutotuneAlgorithm\022\016\n\nHILL" + + "_CLIMB\020\000\022\024\n\020GRADIENT_DESCENT\020\001B\201\001\n\037org.t" + + "ensorflow.proto.data.modelB\013ModelProtosP" + + "\001ZLgithub.com/tensorflow/tensorflow/tens" + + "orflow/go/core/framework/model_go_proto\370" + + "\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -88,7 +94,7 @@ public static void registerAllExtensions( internal_static_tensorflow_data_model_ModelProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_model_ModelProto_descriptor, - new java.lang.String[] { "Output", "IdCounter", "CollectResourceUsage", "OptimizationParams", }); + new java.lang.String[] { "Nodes", "Output", "IdCounter", "OptimizationParams", }); internal_static_tensorflow_data_model_ModelProto_Node_descriptor = internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(0); internal_static_tensorflow_data_model_ModelProto_Node_fieldAccessorTable = new @@ -101,8 +107,14 @@ public static void registerAllExtensions( com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_model_ModelProto_Node_Parameter_descriptor, new java.lang.String[] { "Name", "Value", "StateValue", "Min", "Max", "Tunable", }); - internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor = + internal_static_tensorflow_data_model_ModelProto_NodesEntry_descriptor = internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(1); + internal_static_tensorflow_data_model_ModelProto_NodesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_model_ModelProto_NodesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor = + internal_static_tensorflow_data_model_ModelProto_descriptor.getNestedTypes().get(2); internal_static_tensorflow_data_model_ModelProto_OptimizationParams_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_model_ModelProto_OptimizationParams_descriptor, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ApiDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ApiDef.java index 99916fcf1f2..57844b384b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ApiDef.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ApiDef.java @@ -12,7 +12,7 @@ * The per-client-language ApiDefs will inherit values from the * common ApiDefs which it can either replace or modify. * We separate the API definition from the OpDef so we can evolve the - * API while remaining backwards compatible when interpretting old + * API while remaining backwards compatible when interpreting old * graphs. Overrides go in an "api_def.pbtxt" file with a text-format * ApiDefs message. * WARNING: Be *very* careful changing the API for any existing op -- @@ -4133,7 +4133,7 @@ protected Builder newBuilderForType( * The per-client-language ApiDefs will inherit values from the * common ApiDefs which it can either replace or modify. * We separate the API definition from the OpDef so we can evolve the - * API while remaining backwards compatible when interpretting old + * API while remaining backwards compatible when interpreting old * graphs. Overrides go in an "api_def.pbtxt" file with a text-format * ApiDefs message. * WARNING: Be *very* careful changing the API for any existing op -- diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java index 989d2bbeeb3..31be1799f01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java @@ -533,15 +533,14 @@ public interface ExperimentalOrBuilder extends /** *
    -     * Whether the remote devices in the cluster should be fetched during setup
    -     * of multi-client cluster. If enabled, the workers will run an extra device
    -     * information exchange step during startup and the workers' EagerContexts
    -     * will become aware of remote devices in the cluster as well.
    +     * Whether functional control flow op lowering should be disabled. This is
    +     * useful when executing within a portable runtime where control flow op
    +     * kernels may not be loaded due to selective registration.
          * 
    * - * bool fetch_remote_devices_in_multi_client = 20; + * bool disable_functional_ops_lowering = 21; */ - boolean getFetchRemoteDevicesInMultiClient(); + boolean getDisableFunctionalOpsLowering(); } /** *
    @@ -700,9 +699,9 @@ private Experimental(
                   coordinationService_ = s;
                   break;
                 }
    -            case 160: {
    +            case 168: {
     
    -              fetchRemoteDevicesInMultiClient_ = input.readBool();
    +              disableFunctionalOpsLowering_ = input.readBool();
                   break;
                 }
                 default: {
    @@ -1344,20 +1343,19 @@ public java.lang.String getCoordinationService() {
           }
         }
     
    -    public static final int FETCH_REMOTE_DEVICES_IN_MULTI_CLIENT_FIELD_NUMBER = 20;
    -    private boolean fetchRemoteDevicesInMultiClient_;
    +    public static final int DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER = 21;
    +    private boolean disableFunctionalOpsLowering_;
         /**
          * 
    -     * Whether the remote devices in the cluster should be fetched during setup
    -     * of multi-client cluster. If enabled, the workers will run an extra device
    -     * information exchange step during startup and the workers' EagerContexts
    -     * will become aware of remote devices in the cluster as well.
    +     * Whether functional control flow op lowering should be disabled. This is
    +     * useful when executing within a portable runtime where control flow op
    +     * kernels may not be loaded due to selective registration.
          * 
    * - * bool fetch_remote_devices_in_multi_client = 20; + * bool disable_functional_ops_lowering = 21; */ - public boolean getFetchRemoteDevicesInMultiClient() { - return fetchRemoteDevicesInMultiClient_; + public boolean getDisableFunctionalOpsLowering() { + return disableFunctionalOpsLowering_; } private byte memoizedIsInitialized = -1; @@ -1428,8 +1426,8 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (!getCoordinationServiceBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 19, coordinationService_); } - if (fetchRemoteDevicesInMultiClient_ != false) { - output.writeBool(20, fetchRemoteDevicesInMultiClient_); + if (disableFunctionalOpsLowering_ != false) { + output.writeBool(21, disableFunctionalOpsLowering_); } unknownFields.writeTo(output); } @@ -1509,9 +1507,9 @@ public int getSerializedSize() { if (!getCoordinationServiceBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(19, coordinationService_); } - if (fetchRemoteDevicesInMultiClient_ != false) { + if (disableFunctionalOpsLowering_ != false) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(20, fetchRemoteDevicesInMultiClient_); + .computeBoolSize(21, disableFunctionalOpsLowering_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -1566,8 +1564,8 @@ public boolean equals(final java.lang.Object obj) { != other.getUseTfrt()) return false; if (!getCoordinationService() .equals(other.getCoordinationService())) return false; - if (getFetchRemoteDevicesInMultiClient() - != other.getFetchRemoteDevicesInMultiClient()) return false; + if (getDisableFunctionalOpsLowering() + != other.getDisableFunctionalOpsLowering()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1629,9 +1627,9 @@ public int hashCode() { getUseTfrt()); hash = (37 * hash) + COORDINATION_SERVICE_FIELD_NUMBER; hash = (53 * hash) + getCoordinationService().hashCode(); - hash = (37 * hash) + FETCH_REMOTE_DEVICES_IN_MULTI_CLIENT_FIELD_NUMBER; + hash = (37 * hash) + DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getFetchRemoteDevicesInMultiClient()); + getDisableFunctionalOpsLowering()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1811,7 +1809,7 @@ public Builder clear() { coordinationService_ = ""; - fetchRemoteDevicesInMultiClient_ = false; + disableFunctionalOpsLowering_ = false; return this; } @@ -1861,7 +1859,7 @@ public org.tensorflow.proto.framework.ConfigProto.Experimental buildPartial() { result.xlaFusionAutotunerThresh_ = xlaFusionAutotunerThresh_; result.useTfrt_ = useTfrt_; result.coordinationService_ = coordinationService_; - result.fetchRemoteDevicesInMultiClient_ = fetchRemoteDevicesInMultiClient_; + result.disableFunctionalOpsLowering_ = disableFunctionalOpsLowering_; onBuilt(); return result; } @@ -1967,8 +1965,8 @@ public Builder mergeFrom(org.tensorflow.proto.framework.ConfigProto.Experimental coordinationService_ = other.coordinationService_; onChanged(); } - if (other.getFetchRemoteDevicesInMultiClient() != false) { - setFetchRemoteDevicesInMultiClient(other.getFetchRemoteDevicesInMultiClient()); + if (other.getDisableFunctionalOpsLowering() != false) { + setDisableFunctionalOpsLowering(other.getDisableFunctionalOpsLowering()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -3175,49 +3173,46 @@ public Builder setCoordinationServiceBytes( return this; } - private boolean fetchRemoteDevicesInMultiClient_ ; + private boolean disableFunctionalOpsLowering_ ; /** *
    -       * Whether the remote devices in the cluster should be fetched during setup
    -       * of multi-client cluster. If enabled, the workers will run an extra device
    -       * information exchange step during startup and the workers' EagerContexts
    -       * will become aware of remote devices in the cluster as well.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * bool fetch_remote_devices_in_multi_client = 20; + * bool disable_functional_ops_lowering = 21; */ - public boolean getFetchRemoteDevicesInMultiClient() { - return fetchRemoteDevicesInMultiClient_; + public boolean getDisableFunctionalOpsLowering() { + return disableFunctionalOpsLowering_; } /** *
    -       * Whether the remote devices in the cluster should be fetched during setup
    -       * of multi-client cluster. If enabled, the workers will run an extra device
    -       * information exchange step during startup and the workers' EagerContexts
    -       * will become aware of remote devices in the cluster as well.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * bool fetch_remote_devices_in_multi_client = 20; + * bool disable_functional_ops_lowering = 21; */ - public Builder setFetchRemoteDevicesInMultiClient(boolean value) { + public Builder setDisableFunctionalOpsLowering(boolean value) { - fetchRemoteDevicesInMultiClient_ = value; + disableFunctionalOpsLowering_ = value; onChanged(); return this; } /** *
    -       * Whether the remote devices in the cluster should be fetched during setup
    -       * of multi-client cluster. If enabled, the workers will run an extra device
    -       * information exchange step during startup and the workers' EagerContexts
    -       * will become aware of remote devices in the cluster as well.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * bool fetch_remote_devices_in_multi_client = 20; + * bool disable_functional_ops_lowering = 21; */ - public Builder clearFetchRemoteDevicesInMultiClient() { + public Builder clearDisableFunctionalOpsLowering() { - fetchRemoteDevicesInMultiClient_ = false; + disableFunctionalOpsLowering_ = false; onChanged(); return this; } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java index c0bcb9ba644..9ddfa2e80ff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java @@ -150,119 +150,120 @@ public static void registerAllExtensions( "\'\n\037internal_fragmentation_fraction\030\n \001(\001" + "\022\035\n\025use_cuda_malloc_async\030\013 \001(\010\032;\n\016Virtu" + "alDevices\022\027\n\017memory_limit_mb\030\001 \003(\002\022\020\n\010pr" + - "iority\030\002 \003(\005\"\205\003\n\020OptimizerOptions\022+\n#do_" + + "iority\030\002 \003(\005\"\235\003\n\020OptimizerOptions\022+\n#do_" + "common_subexpression_elimination\030\001 \001(\010\022\033" + "\n\023do_constant_folding\030\002 \001(\010\022$\n\034max_folde" + "d_constant_in_bytes\030\006 \001(\003\022\034\n\024do_function" + "_inlining\030\004 \001(\010\0225\n\topt_level\030\003 \001(\0162\".ten" + "sorflow.OptimizerOptions.Level\022E\n\020global" + "_jit_level\030\005 \001(\0162+.tensorflow.OptimizerO" + - "ptions.GlobalJitLevel\" \n\005Level\022\006\n\002L1\020\000\022\017" + - "\n\002L0\020\377\377\377\377\377\377\377\377\377\001\"C\n\016GlobalJitLevel\022\013\n\007DEF" + - "AULT\020\000\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022\010\n\004ON" + - "_2\020\002\"\356\002\n\014GraphOptions\022\036\n\026enable_recv_sch" + - "eduling\030\002 \001(\010\0227\n\021optimizer_options\030\003 \001(\013" + - "2\034.tensorflow.OptimizerOptions\022\030\n\020build_" + - "cost_model\030\004 \001(\003\022\036\n\026build_cost_model_aft" + - "er\030\t \001(\003\022\024\n\014infer_shapes\030\005 \001(\010\022\032\n\022place_" + - "pruned_graph\030\006 \001(\010\022 \n\030enable_bfloat16_se" + - "ndrecv\030\007 \001(\010\022\025\n\rtimeline_step\030\010 \001(\005\0223\n\017r" + - "ewrite_options\030\n \001(\0132\032.tensorflow.Rewrit" + - "erConfigJ\004\010\001\020\002R%skip_common_subexpressio" + - "n_elimination\"A\n\025ThreadPoolOptionProto\022\023" + - "\n\013num_threads\030\001 \001(\005\022\023\n\013global_name\030\002 \001(\t" + - "\"\325\001\n\nRPCOptions\022$\n\034use_rpc_for_inprocess" + - "_master\030\001 \001(\010\022\035\n\025compression_algorithm\030\002" + - " \001(\t\022\031\n\021compression_level\030\003 \001(\005\022\032\n\022cache" + - "_rpc_response\030\004 \001(\010\022*\n\"disable_session_c" + - "onnection_sharing\030\005 \001(\010\022\037\n\027num_channels_" + - "per_target\030\006 \001(\005\"0\n\017SessionMetadata\022\014\n\004n" + - "ame\030\001 \001(\t\022\017\n\007version\030\002 \001(\003\"\330\r\n\013ConfigPro" + - "to\022>\n\014device_count\030\001 \003(\0132(.tensorflow.Co" + - "nfigProto.DeviceCountEntry\022$\n\034intra_op_p" + - "arallelism_threads\030\002 \001(\005\022$\n\034inter_op_par" + - "allelism_threads\030\005 \001(\005\022\037\n\027use_per_sessio" + - "n_threads\030\t \001(\010\022G\n\034session_inter_op_thre" + - "ad_pool\030\014 \003(\0132!.tensorflow.ThreadPoolOpt" + - "ionProto\022\030\n\020placement_period\030\003 \001(\005\022\026\n\016de" + - "vice_filters\030\004 \003(\t\022+\n\013gpu_options\030\006 \001(\0132" + - "\026.tensorflow.GPUOptions\022\034\n\024allow_soft_pl" + - "acement\030\007 \001(\010\022\034\n\024log_device_placement\030\010 " + - "\001(\010\022/\n\rgraph_options\030\n \001(\0132\030.tensorflow." + - "GraphOptions\022\037\n\027operation_timeout_in_ms\030" + - "\013 \001(\003\022+\n\013rpc_options\030\r \001(\0132\026.tensorflow." + - "RPCOptions\022+\n\013cluster_def\030\016 \001(\0132\026.tensor" + - "flow.ClusterDef\022\035\n\025isolate_session_state" + - "\030\017 \001(\010\022(\n share_cluster_devices_in_sessi" + - "on\030\021 \001(\010\022:\n\014experimental\030\020 \001(\0132$.tensorf" + - "low.ConfigProto.Experimental\0322\n\020DeviceCo" + - "untEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001" + - "\032\322\007\n\014Experimental\022\037\n\027collective_group_le" + - "ader\030\001 \001(\t\022\025\n\rexecutor_type\030\003 \001(\t\022\032\n\022rec" + - "v_buf_max_chunk\030\004 \001(\005\022\031\n\021use_numa_affini" + - "ty\030\005 \001(\010\0225\n-collective_deterministic_seq" + - "uential_execution\030\006 \001(\010\022\027\n\017collective_nc" + - "cl\030\007 \001(\010\0226\n.share_session_state_in_clust" + - "erspec_propagation\030\010 \001(\010\022\037\n\027disable_thre" + - "ad_spinning\030\t \001(\010\022(\n share_cluster_devic" + - "es_in_session\030\n \001(\010\0225\n\020session_metadata\030" + - "\013 \001(\0132\033.tensorflow.SessionMetadata\022!\n\031op" + - "timize_for_static_graph\030\014 \001(\010\022\032\n\022enable_" + - "mlir_bridge\030\r \001(\010\022S\n\023mlir_bridge_rollout" + - "\030\021 \001(\01626.tensorflow.ConfigProto.Experime" + - "ntal.MlirBridgeRollout\022&\n\036enable_mlir_gr" + - "aph_optimization\030\020 \001(\010\022\'\n\037disable_output" + - "_partition_graphs\030\016 \001(\010\022#\n\033xla_fusion_au" + - "totuner_thresh\030\017 \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\034" + - "\n\024coordination_service\030\023 \001(\t\022,\n$fetch_re" + - "mote_devices_in_multi_client\030\024 \001(\010\"\332\001\n\021M" + - "lirBridgeRollout\022#\n\037MLIR_BRIDGE_ROLLOUT_" + - "UNSPECIFIED\020\000\022\037\n\033MLIR_BRIDGE_ROLLOUT_ENA" + - "BLED\020\001\022 \n\034MLIR_BRIDGE_ROLLOUT_DISABLED\020\002" + - "\022)\n%MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLE" + - "D\020\003\0222\n.MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FAL" + - "LBACK_ENABLED\020\004J\004\010\002\020\003\"\341\004\n\nRunOptions\0226\n\013" + - "trace_level\030\001 \001(\0162!.tensorflow.RunOption" + - "s.TraceLevel\022\025\n\rtimeout_in_ms\030\002 \001(\003\022\034\n\024i" + - "nter_op_thread_pool\030\003 \001(\005\022\037\n\027output_part" + - "ition_graphs\030\005 \001(\010\022/\n\rdebug_options\030\006 \001(" + - "\0132\030.tensorflow.DebugOptions\022*\n\"report_te" + - "nsor_allocations_upon_oom\030\007 \001(\010\0229\n\014exper" + - "imental\030\010 \001(\0132#.tensorflow.RunOptions.Ex" + - "perimental\032\322\001\n\014Experimental\022\034\n\024collectiv" + - "e_graph_key\030\001 \001(\003\022\034\n\024use_run_handler_poo" + - "l\030\002 \001(\010\022[\n\030run_handler_pool_options\030\003 \001(" + - "\01329.tensorflow.RunOptions.Experimental.R" + - "unHandlerPoolOptions\032)\n\025RunHandlerPoolOp" + - "tions\022\020\n\010priority\030\001 \001(\003\"R\n\nTraceLevel\022\014\n" + - "\010NO_TRACE\020\000\022\022\n\016SOFTWARE_TRACE\020\001\022\022\n\016HARDW" + - "ARE_TRACE\020\002\022\016\n\nFULL_TRACE\020\003J\004\010\004\020\005\"\207\003\n\013Ru" + - "nMetadata\022)\n\nstep_stats\030\001 \001(\0132\025.tensorfl" + - "ow.StepStats\022,\n\ncost_graph\030\002 \001(\0132\030.tenso" + - "rflow.CostGraphDef\022.\n\020partition_graphs\030\003" + - " \003(\0132\024.tensorflow.GraphDef\022?\n\017function_g" + - "raphs\030\004 \003(\0132&.tensorflow.RunMetadata.Fun" + - "ctionGraphs\032\255\001\n\016FunctionGraphs\022.\n\020partit" + - "ion_graphs\030\001 \003(\0132\024.tensorflow.GraphDef\0224" + - "\n\026pre_optimization_graph\030\002 \001(\0132\024.tensorf" + - "low.GraphDef\0225\n\027post_optimization_graph\030" + - "\003 \001(\0132\024.tensorflow.GraphDef\":\n\020TensorCon" + - "nection\022\023\n\013from_tensor\030\001 \001(\t\022\021\n\tto_tenso" + - "r\030\002 \001(\t\"\260\003\n\017CallableOptions\022\014\n\004feed\030\001 \003(" + - "\t\022\r\n\005fetch\030\002 \003(\t\022\016\n\006target\030\003 \003(\t\022+\n\013run_" + - "options\030\004 \001(\0132\026.tensorflow.RunOptions\0227\n" + - "\021tensor_connection\030\005 \003(\0132\034.tensorflow.Te" + - "nsorConnection\022B\n\014feed_devices\030\006 \003(\0132,.t" + - "ensorflow.CallableOptions.FeedDevicesEnt" + - "ry\022D\n\rfetch_devices\030\007 \003(\0132-.tensorflow.C" + - "allableOptions.FetchDevicesEntry\022\027\n\017fetc" + - "h_skip_sync\030\010 \001(\010\0322\n\020FeedDevicesEntry\022\013\n" + - "\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0323\n\021FetchDe" + - "vicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\002" + - "8\001B\212\001\n\036org.tensorflow.proto.frameworkB\014C" + - "onfigProtosP\001ZUgithub.com/tensorflow/ten" + - "sorflow/tensorflow/go/core/protobuf/for_" + - "core_protos_go_proto\370\001\001b\006proto3" + "ptions.GlobalJitLevel\022\026\n\016cpu_global_jit\030" + + "\007 \001(\010\" \n\005Level\022\006\n\002L1\020\000\022\017\n\002L0\020\377\377\377\377\377\377\377\377\377\001\"" + + "C\n\016GlobalJitLevel\022\013\n\007DEFAULT\020\000\022\020\n\003OFF\020\377\377" + + "\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022\010\n\004ON_2\020\002\"\356\002\n\014GraphOp" + + "tions\022\036\n\026enable_recv_scheduling\030\002 \001(\010\0227\n" + + "\021optimizer_options\030\003 \001(\0132\034.tensorflow.Op" + + "timizerOptions\022\030\n\020build_cost_model\030\004 \001(\003" + + "\022\036\n\026build_cost_model_after\030\t \001(\003\022\024\n\014infe" + + "r_shapes\030\005 \001(\010\022\032\n\022place_pruned_graph\030\006 \001" + + "(\010\022 \n\030enable_bfloat16_sendrecv\030\007 \001(\010\022\025\n\r" + + "timeline_step\030\010 \001(\005\0223\n\017rewrite_options\030\n" + + " \001(\0132\032.tensorflow.RewriterConfigJ\004\010\001\020\002R%" + + "skip_common_subexpression_elimination\"A\n" + + "\025ThreadPoolOptionProto\022\023\n\013num_threads\030\001 " + + "\001(\005\022\023\n\013global_name\030\002 \001(\t\"\325\001\n\nRPCOptions\022" + + "$\n\034use_rpc_for_inprocess_master\030\001 \001(\010\022\035\n" + + "\025compression_algorithm\030\002 \001(\t\022\031\n\021compress" + + "ion_level\030\003 \001(\005\022\032\n\022cache_rpc_response\030\004 " + + "\001(\010\022*\n\"disable_session_connection_sharin" + + "g\030\005 \001(\010\022\037\n\027num_channels_per_target\030\006 \001(\005" + + "\"0\n\017SessionMetadata\022\014\n\004name\030\001 \001(\t\022\017\n\007ver" + + "sion\030\002 \001(\003\"\331\r\n\013ConfigProto\022>\n\014device_cou" + + "nt\030\001 \003(\0132(.tensorflow.ConfigProto.Device" + + "CountEntry\022$\n\034intra_op_parallelism_threa" + + "ds\030\002 \001(\005\022$\n\034inter_op_parallelism_threads" + + "\030\005 \001(\005\022\037\n\027use_per_session_threads\030\t \001(\010\022" + + "G\n\034session_inter_op_thread_pool\030\014 \003(\0132!." + + "tensorflow.ThreadPoolOptionProto\022\030\n\020plac" + + "ement_period\030\003 \001(\005\022\026\n\016device_filters\030\004 \003" + + "(\t\022+\n\013gpu_options\030\006 \001(\0132\026.tensorflow.GPU" + + "Options\022\034\n\024allow_soft_placement\030\007 \001(\010\022\034\n" + + "\024log_device_placement\030\010 \001(\010\022/\n\rgraph_opt" + + "ions\030\n \001(\0132\030.tensorflow.GraphOptions\022\037\n\027" + + "operation_timeout_in_ms\030\013 \001(\003\022+\n\013rpc_opt" + + "ions\030\r \001(\0132\026.tensorflow.RPCOptions\022+\n\013cl" + + "uster_def\030\016 \001(\0132\026.tensorflow.ClusterDef\022" + + "\035\n\025isolate_session_state\030\017 \001(\010\022(\n share_" + + "cluster_devices_in_session\030\021 \001(\010\022:\n\014expe" + + "rimental\030\020 \001(\0132$.tensorflow.ConfigProto." + + "Experimental\0322\n\020DeviceCountEntry\022\013\n\003key\030" + + "\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001\032\323\007\n\014Experimenta" + + "l\022\037\n\027collective_group_leader\030\001 \001(\t\022\025\n\rex" + + "ecutor_type\030\003 \001(\t\022\032\n\022recv_buf_max_chunk\030" + + "\004 \001(\005\022\031\n\021use_numa_affinity\030\005 \001(\010\0225\n-coll" + + "ective_deterministic_sequential_executio" + + "n\030\006 \001(\010\022\027\n\017collective_nccl\030\007 \001(\010\0226\n.shar" + + "e_session_state_in_clusterspec_propagati" + + "on\030\010 \001(\010\022\037\n\027disable_thread_spinning\030\t \001(" + + "\010\022(\n share_cluster_devices_in_session\030\n " + + "\001(\010\0225\n\020session_metadata\030\013 \001(\0132\033.tensorfl" + + "ow.SessionMetadata\022!\n\031optimize_for_stati" + + "c_graph\030\014 \001(\010\022\032\n\022enable_mlir_bridge\030\r \001(" + + "\010\022S\n\023mlir_bridge_rollout\030\021 \001(\01626.tensorf" + + "low.ConfigProto.Experimental.MlirBridgeR" + + "ollout\022&\n\036enable_mlir_graph_optimization" + + "\030\020 \001(\010\022\'\n\037disable_output_partition_graph" + + "s\030\016 \001(\010\022#\n\033xla_fusion_autotuner_thresh\030\017" + + " \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\034\n\024coordination_s" + + "ervice\030\023 \001(\t\022\'\n\037disable_functional_ops_l" + + "owering\030\025 \001(\010\"\332\001\n\021MlirBridgeRollout\022#\n\037M" + + "LIR_BRIDGE_ROLLOUT_UNSPECIFIED\020\000\022\037\n\033MLIR" + + "_BRIDGE_ROLLOUT_ENABLED\020\001\022 \n\034MLIR_BRIDGE" + + "_ROLLOUT_DISABLED\020\002\022)\n%MLIR_BRIDGE_ROLLO" + + "UT_SAFE_MODE_ENABLED\020\003\0222\n.MLIR_BRIDGE_RO" + + "LLOUT_SAFE_MODE_FALLBACK_ENABLED\020\004J\004\010\002\020\003" + + "J\004\010\024\020\025\"\341\004\n\nRunOptions\0226\n\013trace_level\030\001 \001" + + "(\0162!.tensorflow.RunOptions.TraceLevel\022\025\n" + + "\rtimeout_in_ms\030\002 \001(\003\022\034\n\024inter_op_thread_" + + "pool\030\003 \001(\005\022\037\n\027output_partition_graphs\030\005 " + + "\001(\010\022/\n\rdebug_options\030\006 \001(\0132\030.tensorflow." + + "DebugOptions\022*\n\"report_tensor_allocation" + + "s_upon_oom\030\007 \001(\010\0229\n\014experimental\030\010 \001(\0132#" + + ".tensorflow.RunOptions.Experimental\032\322\001\n\014" + + "Experimental\022\034\n\024collective_graph_key\030\001 \001" + + "(\003\022\034\n\024use_run_handler_pool\030\002 \001(\010\022[\n\030run_" + + "handler_pool_options\030\003 \001(\01329.tensorflow." + + "RunOptions.Experimental.RunHandlerPoolOp" + + "tions\032)\n\025RunHandlerPoolOptions\022\020\n\010priori" + + "ty\030\001 \001(\003\"R\n\nTraceLevel\022\014\n\010NO_TRACE\020\000\022\022\n\016" + + "SOFTWARE_TRACE\020\001\022\022\n\016HARDWARE_TRACE\020\002\022\016\n\n" + + "FULL_TRACE\020\003J\004\010\004\020\005\"\207\003\n\013RunMetadata\022)\n\nst" + + "ep_stats\030\001 \001(\0132\025.tensorflow.StepStats\022,\n" + + "\ncost_graph\030\002 \001(\0132\030.tensorflow.CostGraph" + + "Def\022.\n\020partition_graphs\030\003 \003(\0132\024.tensorfl" + + "ow.GraphDef\022?\n\017function_graphs\030\004 \003(\0132&.t" + + "ensorflow.RunMetadata.FunctionGraphs\032\255\001\n" + + "\016FunctionGraphs\022.\n\020partition_graphs\030\001 \003(" + + "\0132\024.tensorflow.GraphDef\0224\n\026pre_optimizat" + + "ion_graph\030\002 \001(\0132\024.tensorflow.GraphDef\0225\n" + + "\027post_optimization_graph\030\003 \001(\0132\024.tensorf" + + "low.GraphDef\":\n\020TensorConnection\022\023\n\013from" + + "_tensor\030\001 \001(\t\022\021\n\tto_tensor\030\002 \001(\t\"\260\003\n\017Cal" + + "lableOptions\022\014\n\004feed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(" + + "\t\022\016\n\006target\030\003 \003(\t\022+\n\013run_options\030\004 \001(\0132\026" + + ".tensorflow.RunOptions\0227\n\021tensor_connect" + + "ion\030\005 \003(\0132\034.tensorflow.TensorConnection\022" + + "B\n\014feed_devices\030\006 \003(\0132,.tensorflow.Calla" + + "bleOptions.FeedDevicesEntry\022D\n\rfetch_dev" + + "ices\030\007 \003(\0132-.tensorflow.CallableOptions." + + "FetchDevicesEntry\022\027\n\017fetch_skip_sync\030\010 \001" + + "(\010\0322\n\020FeedDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005v" + + "alue\030\002 \001(\t:\0028\001\0323\n\021FetchDevicesEntry\022\013\n\003k" + + "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\212\001\n\036org.tens" + + "orflow.proto.frameworkB\014ConfigProtosP\001ZU" + + "github.com/tensorflow/tensorflow/tensorf" + + "low/go/core/protobuf/for_core_protos_go_" + + "proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -297,7 +298,7 @@ public static void registerAllExtensions( internal_static_tensorflow_OptimizerOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_OptimizerOptions_descriptor, - new java.lang.String[] { "DoCommonSubexpressionElimination", "DoConstantFolding", "MaxFoldedConstantInBytes", "DoFunctionInlining", "OptLevel", "GlobalJitLevel", }); + new java.lang.String[] { "DoCommonSubexpressionElimination", "DoConstantFolding", "MaxFoldedConstantInBytes", "DoFunctionInlining", "OptLevel", "GlobalJitLevel", "CpuGlobalJit", }); internal_static_tensorflow_GraphOptions_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_tensorflow_GraphOptions_fieldAccessorTable = new @@ -339,7 +340,7 @@ public static void registerAllExtensions( internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_ConfigProto_Experimental_descriptor, - new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "CoordinationService", "FetchRemoteDevicesInMultiClient", }); + new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "CoordinationService", "DisableFunctionalOpsLowering", }); internal_static_tensorflow_RunOptions_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_tensorflow_RunOptions_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributes.java index 3da166dfbfd..88a956395be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributes.java @@ -92,6 +92,11 @@ private DeviceAttributes( physicalDeviceDesc_ = s; break; } + case 64: { + + xlaGlobalId_ = input.readInt64(); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -313,6 +318,21 @@ public java.lang.String getPhysicalDeviceDesc() { } } + public static final int XLA_GLOBAL_ID_FIELD_NUMBER = 8; + private long xlaGlobalId_; + /** + *
    +   * A physical device ID for use in XLA DeviceAssignments, unique across
    +   * clients in a multi-client setup. Set to -1 if unavailable, non-negative
    +   * otherwise.
    +   * 
    + * + * int64 xla_global_id = 8; + */ + public long getXlaGlobalId() { + return xlaGlobalId_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -345,6 +365,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (!getPhysicalDeviceDescBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 7, physicalDeviceDesc_); } + if (xlaGlobalId_ != 0L) { + output.writeInt64(8, xlaGlobalId_); + } unknownFields.writeTo(output); } @@ -375,6 +398,10 @@ public int getSerializedSize() { if (!getPhysicalDeviceDescBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, physicalDeviceDesc_); } + if (xlaGlobalId_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, xlaGlobalId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -405,6 +432,8 @@ public boolean equals(final java.lang.Object obj) { != other.getIncarnation()) return false; if (!getPhysicalDeviceDesc() .equals(other.getPhysicalDeviceDesc())) return false; + if (getXlaGlobalId() + != other.getXlaGlobalId()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -432,6 +461,9 @@ public int hashCode() { getIncarnation()); hash = (37 * hash) + PHYSICAL_DEVICE_DESC_FIELD_NUMBER; hash = (53 * hash) + getPhysicalDeviceDesc().hashCode(); + hash = (37 * hash) + XLA_GLOBAL_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getXlaGlobalId()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -581,6 +613,8 @@ public Builder clear() { physicalDeviceDesc_ = ""; + xlaGlobalId_ = 0L; + return this; } @@ -617,6 +651,7 @@ public org.tensorflow.proto.framework.DeviceAttributes buildPartial() { } result.incarnation_ = incarnation_; result.physicalDeviceDesc_ = physicalDeviceDesc_; + result.xlaGlobalId_ = xlaGlobalId_; onBuilt(); return result; } @@ -686,6 +721,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.DeviceAttributes other) physicalDeviceDesc_ = other.physicalDeviceDesc_; onChanged(); } + if (other.getXlaGlobalId() != 0L) { + setXlaGlobalId(other.getXlaGlobalId()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1222,6 +1260,50 @@ public Builder setPhysicalDeviceDescBytes( onChanged(); return this; } + + private long xlaGlobalId_ ; + /** + *
    +     * A physical device ID for use in XLA DeviceAssignments, unique across
    +     * clients in a multi-client setup. Set to -1 if unavailable, non-negative
    +     * otherwise.
    +     * 
    + * + * int64 xla_global_id = 8; + */ + public long getXlaGlobalId() { + return xlaGlobalId_; + } + /** + *
    +     * A physical device ID for use in XLA DeviceAssignments, unique across
    +     * clients in a multi-client setup. Set to -1 if unavailable, non-negative
    +     * otherwise.
    +     * 
    + * + * int64 xla_global_id = 8; + */ + public Builder setXlaGlobalId(long value) { + + xlaGlobalId_ = value; + onChanged(); + return this; + } + /** + *
    +     * A physical device ID for use in XLA DeviceAssignments, unique across
    +     * clients in a multi-client setup. Set to -1 if unavailable, non-negative
    +     * otherwise.
    +     * 
    + * + * int64 xla_global_id = 8; + */ + public Builder clearXlaGlobalId() { + + xlaGlobalId_ = 0L; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesOrBuilder.java index bb8c806b325..565e6c205ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesOrBuilder.java @@ -107,4 +107,15 @@ public interface DeviceAttributesOrBuilder extends */ com.google.protobuf.ByteString getPhysicalDeviceDescBytes(); + + /** + *
    +   * A physical device ID for use in XLA DeviceAssignments, unique across
    +   * clients in a multi-client setup. Set to -1 if unavailable, non-negative
    +   * otherwise.
    +   * 
    + * + * int64 xla_global_id = 8; + */ + long getXlaGlobalId(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesProtos.java index f9e2dac8a67..a9f2039c3a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/DeviceAttributesProtos.java @@ -50,15 +50,16 @@ public static void registerAllExtensions( "(\0132\034.tensorflow.InterconnectLink\"Z\n\016Devi" + "ceLocality\022\016\n\006bus_id\030\001 \001(\005\022\021\n\tnuma_node\030" + "\002 \001(\005\022%\n\005links\030\003 \001(\0132\026.tensorflow.LocalL" + - "inks\"\254\001\n\020DeviceAttributes\022\014\n\004name\030\001 \001(\t\022" + + "inks\"\303\001\n\020DeviceAttributes\022\014\n\004name\030\001 \001(\t\022" + "\023\n\013device_type\030\002 \001(\t\022\024\n\014memory_limit\030\004 \001" + "(\003\022,\n\010locality\030\005 \001(\0132\032.tensorflow.Device" + "Locality\022\023\n\013incarnation\030\006 \001(\006\022\034\n\024physica" + - "l_device_desc\030\007 \001(\tB\227\001\n\036org.tensorflow.p" + - "roto.frameworkB\026DeviceAttributesProtosP\001" + - "ZXgithub.com/tensorflow/tensorflow/tenso" + - "rflow/go/core/framework/device_attribute" + - "s_go_proto\370\001\001b\006proto3" + "l_device_desc\030\007 \001(\t\022\025\n\rxla_global_id\030\010 \001" + + "(\003B\227\001\n\036org.tensorflow.proto.frameworkB\026D" + + "eviceAttributesProtosP\001ZXgithub.com/tens" + + "orflow/tensorflow/tensorflow/go/core/fra" + + "mework/device_attributes_go_proto\370\001\001b\006pr" + + "oto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -87,7 +88,7 @@ public static void registerAllExtensions( internal_static_tensorflow_DeviceAttributes_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_DeviceAttributes_descriptor, - new java.lang.String[] { "Name", "DeviceType", "MemoryLimit", "Locality", "Incarnation", "PhysicalDeviceDesc", }); + new java.lang.String[] { "Name", "DeviceType", "MemoryLimit", "Locality", "Incarnation", "PhysicalDeviceDesc", "XlaGlobalId", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDef.java index f075850d17f..9524d94aa07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDef.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDef.java @@ -79,6 +79,11 @@ private FullTypeDef( attr_ = s; break; } + case 32: { + attrCase_ = 4; + attr_ = input.readInt64(); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -119,6 +124,7 @@ private FullTypeDef( public enum AttrCase implements com.google.protobuf.Internal.EnumLite { S(3), + I(4), ATTR_NOT_SET(0); private final int value; private AttrCase(int value) { @@ -135,6 +141,7 @@ public static AttrCase valueOf(int value) { public static AttrCase forNumber(int value) { switch (value) { case 3: return S; + case 4: return I; case 0: return ATTR_NOT_SET; default: return null; } @@ -257,6 +264,21 @@ public java.lang.String getS() { } } + public static final int I_FIELD_NUMBER = 4; + /** + *
    +   * TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
    +   * 
    + * + * int64 i = 4; + */ + public long getI() { + if (attrCase_ == 4) { + return (java.lang.Long) attr_; + } + return 0L; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -280,6 +302,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (attrCase_ == 3) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, attr_); } + if (attrCase_ == 4) { + output.writeInt64( + 4, (long)((java.lang.Long) attr_)); + } unknownFields.writeTo(output); } @@ -300,6 +326,11 @@ public int getSerializedSize() { if (attrCase_ == 3) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, attr_); } + if (attrCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size( + 4, (long)((java.lang.Long) attr_)); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -324,6 +355,10 @@ public boolean equals(final java.lang.Object obj) { if (!getS() .equals(other.getS())) return false; break; + case 4: + if (getI() + != other.getI()) return false; + break; case 0: default: } @@ -349,6 +384,11 @@ public int hashCode() { hash = (37 * hash) + S_FIELD_NUMBER; hash = (53 * hash) + getS().hashCode(); break; + case 4: + hash = (37 * hash) + I_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getI()); + break; case 0: default: } @@ -543,6 +583,9 @@ public org.tensorflow.proto.framework.FullTypeDef buildPartial() { if (attrCase_ == 3) { result.attr_ = attr_; } + if (attrCase_ == 4) { + result.attr_ = attr_; + } result.attrCase_ = attrCase_; onBuilt(); return result; @@ -628,6 +671,10 @@ public Builder mergeFrom(org.tensorflow.proto.framework.FullTypeDef other) { onChanged(); break; } + case I: { + setI(other.getI()); + break; + } case ATTR_NOT_SET: { break; } @@ -1071,6 +1118,48 @@ public Builder setSBytes( onChanged(); return this; } + + /** + *
    +     * TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
    +     * 
    + * + * int64 i = 4; + */ + public long getI() { + if (attrCase_ == 4) { + return (java.lang.Long) attr_; + } + return 0L; + } + /** + *
    +     * TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
    +     * 
    + * + * int64 i = 4; + */ + public Builder setI(long value) { + attrCase_ = 4; + attr_ = value; + onChanged(); + return this; + } + /** + *
    +     * TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
    +     * 
    + * + * int64 i = 4; + */ + public Builder clearI() { + if (attrCase_ == 4) { + attrCase_ = 0; + attr_ = null; + onChanged(); + } + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDefOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDefOrBuilder.java index 3d0e8acddc2..01719dca7ea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDefOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeDefOrBuilder.java @@ -62,5 +62,14 @@ org.tensorflow.proto.framework.FullTypeDefOrBuilder getArgsOrBuilder( com.google.protobuf.ByteString getSBytes(); + /** + *
    +   * TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
    +   * 
    + * + * int64 i = 4; + */ + long getI(); + public org.tensorflow.proto.framework.FullTypeDef.AttrCase getAttrCase(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java index 5dec66a39a0..dc9ed1c5a70 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java @@ -60,15 +60,35 @@ public enum FullTypeId * TFT_PRODUCT = 3; */ TFT_PRODUCT(3), + /** + *
    +   * Represents a named field, with the name stored in the attribute.
    +   * Parametrization:
    +   *   TFT_NAMED[<type>]{<name>}
    +   *   * <type> is the type of the field
    +   *   * <name> is the field name, as string (thpugh can theoretically be an int
    +   *     as well)
    +   * Example:
    +   *   TFT_RECORD[
    +   *     TFT_NAMED[TFT_TENSOR[TFT_INT32]]{'foo'},
    +   *     TFT_NAMED[TFT_TENSOR[TFT_FLOAT32]]{'bar'},
    +   *   ]
    +   *     is a structure with two fields, an int tensor "foo" and a float tensor
    +   *     "bar".
    +   * 
    + * + * TFT_NAMED = 4; + */ + TFT_NAMED(4), /** *
        * Callable types describe functions and ops.
        * Parametrization:
        *   TFT_CALLABLE[<arg type>, <return type>]
    -   *   * <arg_type> is the type of the arguments; TFT_PRODUCT represents
    +   *   * <arg type> is the type of the arguments; TFT_PRODUCT represents
        *   multiple
        *     arguments.
    -   *   * <return_type> is the return type; TFT_PRODUCT represents multiple
    +   *   * <return type> is the return type; TFT_PRODUCT represents multiple
        *     return values (that means that callables returning multiple things
        *     don't necessarily return a single tuple).
        * Example:
    @@ -88,9 +108,9 @@ public enum FullTypeId
        * The usual Tensor. This is a parametric type.
        * Parametrization:
        *   TFT_TENSOR[<element type>, <shape type>]
    -   *   * <element_type> is currently limited to one of the element types
    +   *   * <element type> is currently limited to one of the element types
        *     defined below.
    -   *   * <shape_type> is not yet defined, and may only be TFT_UNKNOWN for now.
    +   *   * <shape type> is not yet defined, and may only be TFT_UNKNOWN for now.
        * A TFT_SHAPE type will be defined in the future.
        * Example:
        *   TFT_TENSOR[TFT_INT32, TFT_UNKNOWN]
    @@ -113,7 +133,7 @@ public enum FullTypeId
        * The element type may be generic or even TFT_ANY for a heterogenous list.
        * Parametrization:
        *   TFT_ARRAY[<element type>]
    -   *   * <element_type> may be any concrete type.
    +   *   * <element type> may be any concrete type.
        * Examples:
        *   TFT_ARRAY[TFT_TENSOR[TFT_INT32]] is a TensorArray holding int32 Tensors
        *     of any shape.
    @@ -134,7 +154,7 @@ public enum FullTypeId
        * specified type, or nothing at all.
        * Parametrization:
        *   TFT_OPTIONAL[<element type>]
    -   *   * <element_type> may be any concrete type.
    +   *   * <element type> may be any concrete type.
        * Examples:
        *   TFT_OPTIONAL[TFT_TENSOR[TFT_INT32]] is an Optional holding an int32
        *     Tensor of any shape.
    @@ -143,6 +163,21 @@ public enum FullTypeId
        * TFT_OPTIONAL = 1002;
        */
       TFT_OPTIONAL(1002),
    +  /**
    +   * 
    +   * Literal types describe compile-time constant values.
    +   * Literal types may also participate in dependent types.
    +   * Parametrization:
    +   *   TFT_LITERAL[<value type>]{<value>}
    +   *   * <value type> may be any concrete type compatible that can hold <value>
    +   *   * <value> is the type's attribute, and holds the actual literal value
    +   * Examples:
    +   *   TFT_LITERAL[TFT_INT32]{1} is the compile-time constant 1.
    +   * 
    + * + * TFT_LITERAL = 1003; + */ + TFT_LITERAL(1003), /** *
        * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    @@ -153,14 +188,13 @@ public enum FullTypeId
        * A datasets can produce logical structures (e.g. multiple elements). This
        * is expressed using TFT_PRODUCT.
        * Parametrization: TFT_ARRAY[<element type>].
    -   * <element_type> may be a concrete type or a type symbol. It represents the
    -   *   data type of the elements produced by the dataset.
    +   *   * <element type> may be a concrete type or a type symbol. It represents
    +   *     the data type of the elements produced by the dataset.
        * Examples:
        *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
        *     Tensors of unknown shape.
        *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    -   *   a
    -   *     Dataset producing pairs of Tensors, one integer and one float.
    +   *     a Dataset producing pairs of Tensors, one integer and one float.
        * Note: The high ID number is to prepare for the eventuality that Datasets
        * will be supported by user types in the future.
        * 
    @@ -168,6 +202,21 @@ public enum FullTypeId * TFT_DATASET = 10102; */ TFT_DATASET(10102), + /** + *
    +   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    +   * Unlike strict execution models, where ownership of a lock is denoted by
    +   * "running after the lock has been acquired", in non-strict mode, lock
    +   * ownership is in the true sense: "the op argument representing the lock is
    +   * available".
    +   * Mutex locks are the dynamic counterpart of control dependencies.
    +   * TODO(mdan): Properly document this thing.
    +   * Parametrization: TFT_MUTEX_LOCK[].
    +   * 
    + * + * TFT_MUTEX_LOCK = 10202; + */ + TFT_MUTEX_LOCK(10202), /** *
        * The bool element type.
    @@ -305,15 +354,35 @@ public enum FullTypeId
        * TFT_PRODUCT = 3;
        */
       public static final int TFT_PRODUCT_VALUE = 3;
    +  /**
    +   * 
    +   * Represents a named field, with the name stored in the attribute.
    +   * Parametrization:
    +   *   TFT_NAMED[<type>]{<name>}
    +   *   * <type> is the type of the field
    +   *   * <name> is the field name, as string (thpugh can theoretically be an int
    +   *     as well)
    +   * Example:
    +   *   TFT_RECORD[
    +   *     TFT_NAMED[TFT_TENSOR[TFT_INT32]]{'foo'},
    +   *     TFT_NAMED[TFT_TENSOR[TFT_FLOAT32]]{'bar'},
    +   *   ]
    +   *     is a structure with two fields, an int tensor "foo" and a float tensor
    +   *     "bar".
    +   * 
    + * + * TFT_NAMED = 4; + */ + public static final int TFT_NAMED_VALUE = 4; /** *
        * Callable types describe functions and ops.
        * Parametrization:
        *   TFT_CALLABLE[<arg type>, <return type>]
    -   *   * <arg_type> is the type of the arguments; TFT_PRODUCT represents
    +   *   * <arg type> is the type of the arguments; TFT_PRODUCT represents
        *   multiple
        *     arguments.
    -   *   * <return_type> is the return type; TFT_PRODUCT represents multiple
    +   *   * <return type> is the return type; TFT_PRODUCT represents multiple
        *     return values (that means that callables returning multiple things
        *     don't necessarily return a single tuple).
        * Example:
    @@ -333,9 +402,9 @@ public enum FullTypeId
        * The usual Tensor. This is a parametric type.
        * Parametrization:
        *   TFT_TENSOR[<element type>, <shape type>]
    -   *   * <element_type> is currently limited to one of the element types
    +   *   * <element type> is currently limited to one of the element types
        *     defined below.
    -   *   * <shape_type> is not yet defined, and may only be TFT_UNKNOWN for now.
    +   *   * <shape type> is not yet defined, and may only be TFT_UNKNOWN for now.
        * A TFT_SHAPE type will be defined in the future.
        * Example:
        *   TFT_TENSOR[TFT_INT32, TFT_UNKNOWN]
    @@ -358,7 +427,7 @@ public enum FullTypeId
        * The element type may be generic or even TFT_ANY for a heterogenous list.
        * Parametrization:
        *   TFT_ARRAY[<element type>]
    -   *   * <element_type> may be any concrete type.
    +   *   * <element type> may be any concrete type.
        * Examples:
        *   TFT_ARRAY[TFT_TENSOR[TFT_INT32]] is a TensorArray holding int32 Tensors
        *     of any shape.
    @@ -379,7 +448,7 @@ public enum FullTypeId
        * specified type, or nothing at all.
        * Parametrization:
        *   TFT_OPTIONAL[<element type>]
    -   *   * <element_type> may be any concrete type.
    +   *   * <element type> may be any concrete type.
        * Examples:
        *   TFT_OPTIONAL[TFT_TENSOR[TFT_INT32]] is an Optional holding an int32
        *     Tensor of any shape.
    @@ -388,6 +457,21 @@ public enum FullTypeId
        * TFT_OPTIONAL = 1002;
        */
       public static final int TFT_OPTIONAL_VALUE = 1002;
    +  /**
    +   * 
    +   * Literal types describe compile-time constant values.
    +   * Literal types may also participate in dependent types.
    +   * Parametrization:
    +   *   TFT_LITERAL[<value type>]{<value>}
    +   *   * <value type> may be any concrete type compatible that can hold <value>
    +   *   * <value> is the type's attribute, and holds the actual literal value
    +   * Examples:
    +   *   TFT_LITERAL[TFT_INT32]{1} is the compile-time constant 1.
    +   * 
    + * + * TFT_LITERAL = 1003; + */ + public static final int TFT_LITERAL_VALUE = 1003; /** *
        * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    @@ -398,14 +482,13 @@ public enum FullTypeId
        * A datasets can produce logical structures (e.g. multiple elements). This
        * is expressed using TFT_PRODUCT.
        * Parametrization: TFT_ARRAY[<element type>].
    -   * <element_type> may be a concrete type or a type symbol. It represents the
    -   *   data type of the elements produced by the dataset.
    +   *   * <element type> may be a concrete type or a type symbol. It represents
    +   *     the data type of the elements produced by the dataset.
        * Examples:
        *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
        *     Tensors of unknown shape.
        *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    -   *   a
    -   *     Dataset producing pairs of Tensors, one integer and one float.
    +   *     a Dataset producing pairs of Tensors, one integer and one float.
        * Note: The high ID number is to prepare for the eventuality that Datasets
        * will be supported by user types in the future.
        * 
    @@ -413,6 +496,21 @@ public enum FullTypeId * TFT_DATASET = 10102; */ public static final int TFT_DATASET_VALUE = 10102; + /** + *
    +   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    +   * Unlike strict execution models, where ownership of a lock is denoted by
    +   * "running after the lock has been acquired", in non-strict mode, lock
    +   * ownership is in the true sense: "the op argument representing the lock is
    +   * available".
    +   * Mutex locks are the dynamic counterpart of control dependencies.
    +   * TODO(mdan): Properly document this thing.
    +   * Parametrization: TFT_MUTEX_LOCK[].
    +   * 
    + * + * TFT_MUTEX_LOCK = 10202; + */ + public static final int TFT_MUTEX_LOCK_VALUE = 10202; /** *
        * The bool element type.
    @@ -523,11 +621,14 @@ public static FullTypeId forNumber(int value) {
           case 1: return TFT_VAR;
           case 2: return TFT_ANY;
           case 3: return TFT_PRODUCT;
    +      case 4: return TFT_NAMED;
           case 100: return TFT_CALLABLE;
           case 1000: return TFT_TENSOR;
           case 1001: return TFT_ARRAY;
           case 1002: return TFT_OPTIONAL;
    +      case 1003: return TFT_LITERAL;
           case 10102: return TFT_DATASET;
    +      case 10202: return TFT_MUTEX_LOCK;
           case 200: return TFT_BOOL;
           case 201: return TFT_UINT8;
           case 202: return TFT_UINT16;
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java
    index 99f7eecbb7a..bff92b6a7bc 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java
    @@ -29,24 +29,26 @@ public static void registerAllExtensions(
       static {
         java.lang.String[] descriptorData = {
           "\n)tensorflow/core/framework/full_type.pr" +
    -      "oto\022\ntensorflow\"r\n\013FullTypeDef\022\'\n\007type_i" +
    +      "oto\022\ntensorflow\"\177\n\013FullTypeDef\022\'\n\007type_i" +
           "d\030\001 \001(\0162\026.tensorflow.FullTypeId\022%\n\004args\030" +
           "\002 \003(\0132\027.tensorflow.FullTypeDef\022\013\n\001s\030\003 \001(" +
    -      "\tH\000B\006\n\004attr*\254\003\n\nFullTypeId\022\r\n\tTFT_UNSET\020" +
    -      "\000\022\013\n\007TFT_VAR\020\001\022\013\n\007TFT_ANY\020\002\022\017\n\013TFT_PRODU" +
    -      "CT\020\003\022\020\n\014TFT_CALLABLE\020d\022\017\n\nTFT_TENSOR\020\350\007\022" +
    -      "\016\n\tTFT_ARRAY\020\351\007\022\021\n\014TFT_OPTIONAL\020\352\007\022\020\n\013TF" +
    -      "T_DATASET\020\366N\022\r\n\010TFT_BOOL\020\310\001\022\016\n\tTFT_UINT8" +
    -      "\020\311\001\022\017\n\nTFT_UINT16\020\312\001\022\017\n\nTFT_UINT32\020\313\001\022\017\n" +
    -      "\nTFT_UINT64\020\314\001\022\r\n\010TFT_INT8\020\315\001\022\016\n\tTFT_INT" +
    -      "16\020\316\001\022\016\n\tTFT_INT32\020\317\001\022\016\n\tTFT_INT64\020\320\001\022\r\n" +
    -      "\010TFT_HALF\020\321\001\022\016\n\tTFT_FLOAT\020\322\001\022\017\n\nTFT_DOUB" +
    -      "LE\020\323\001\022\021\n\014TFT_BFLOAT16\020\327\001\022\022\n\rTFT_COMPLEX6" +
    -      "4\020\324\001\022\023\n\016TFT_COMPLEX128\020\325\001\022\017\n\nTFT_STRING\020" +
    -      "\326\001B\203\001\n\036org.tensorflow.proto.frameworkB\016F" +
    -      "ullTypeProtosP\001ZLgithub.com/tensorflow/t" +
    -      "ensorflow/tensorflow/go/core/framework/t" +
    -      "ypes_go_proto\370\001\001b\006proto3"
    +      "\tH\000\022\013\n\001i\030\004 \001(\003H\000B\006\n\004attr*\342\003\n\nFullTypeId\022" +
    +      "\r\n\tTFT_UNSET\020\000\022\013\n\007TFT_VAR\020\001\022\013\n\007TFT_ANY\020\002" +
    +      "\022\017\n\013TFT_PRODUCT\020\003\022\r\n\tTFT_NAMED\020\004\022\020\n\014TFT_" +
    +      "CALLABLE\020d\022\017\n\nTFT_TENSOR\020\350\007\022\016\n\tTFT_ARRAY" +
    +      "\020\351\007\022\021\n\014TFT_OPTIONAL\020\352\007\022\020\n\013TFT_LITERAL\020\353\007" +
    +      "\022\020\n\013TFT_DATASET\020\366N\022\023\n\016TFT_MUTEX_LOCK\020\332O\022" +
    +      "\r\n\010TFT_BOOL\020\310\001\022\016\n\tTFT_UINT8\020\311\001\022\017\n\nTFT_UI" +
    +      "NT16\020\312\001\022\017\n\nTFT_UINT32\020\313\001\022\017\n\nTFT_UINT64\020\314" +
    +      "\001\022\r\n\010TFT_INT8\020\315\001\022\016\n\tTFT_INT16\020\316\001\022\016\n\tTFT_" +
    +      "INT32\020\317\001\022\016\n\tTFT_INT64\020\320\001\022\r\n\010TFT_HALF\020\321\001\022" +
    +      "\016\n\tTFT_FLOAT\020\322\001\022\017\n\nTFT_DOUBLE\020\323\001\022\021\n\014TFT_" +
    +      "BFLOAT16\020\327\001\022\022\n\rTFT_COMPLEX64\020\324\001\022\023\n\016TFT_C" +
    +      "OMPLEX128\020\325\001\022\017\n\nTFT_STRING\020\326\001B\207\001\n\036org.te" +
    +      "nsorflow.proto.frameworkB\016FullTypeProtos" +
    +      "P\001ZPgithub.com/tensorflow/tensorflow/ten" +
    +      "sorflow/go/core/framework/full_type_go_p" +
    +      "roto\370\001\001b\006proto3"
         };
         descriptor = com.google.protobuf.Descriptors.FileDescriptor
           .internalBuildGeneratedFileFrom(descriptorData,
    @@ -57,7 +59,7 @@ public static void registerAllExtensions(
         internal_static_tensorflow_FullTypeDef_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_FullTypeDef_descriptor,
    -        new java.lang.String[] { "TypeId", "Args", "S", "Attr", });
    +        new java.lang.String[] { "TypeId", "Args", "S", "I", "Attr", });
       }
     
       // @@protoc_insertion_point(outer_class_scope)
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDef.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDef.java
    index 9bfd5c5c1f3..83b66c9617f 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDef.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDef.java
    @@ -106,6 +106,19 @@ private NodeDef(
     
                 break;
               }
    +          case 58: {
    +            org.tensorflow.proto.framework.FullTypeDef.Builder subBuilder = null;
    +            if (experimentalType_ != null) {
    +              subBuilder = experimentalType_.toBuilder();
    +            }
    +            experimentalType_ = input.readMessage(org.tensorflow.proto.framework.FullTypeDef.parser(), extensionRegistry);
    +            if (subBuilder != null) {
    +              subBuilder.mergeFrom(experimentalType_);
    +              experimentalType_ = subBuilder.buildPartial();
    +            }
    +
    +            break;
    +          }
               default: {
                 if (!parseUnknownField(
                     input, unknownFields, extensionRegistry, tag)) {
    @@ -1667,6 +1680,48 @@ public org.tensorflow.proto.framework.NodeDef.ExperimentalDebugInfoOrBuilder get
         return getExperimentalDebugInfo();
       }
     
    +  public static final int EXPERIMENTAL_TYPE_FIELD_NUMBER = 7;
    +  private org.tensorflow.proto.framework.FullTypeDef experimentalType_;
    +  /**
    +   * 
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public boolean hasExperimentalType() { + return experimentalType_ != null; + } + /** + *
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public org.tensorflow.proto.framework.FullTypeDef getExperimentalType() { + return experimentalType_ == null ? org.tensorflow.proto.framework.FullTypeDef.getDefaultInstance() : experimentalType_; + } + /** + *
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public org.tensorflow.proto.framework.FullTypeDefOrBuilder getExperimentalTypeOrBuilder() { + return getExperimentalType(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -1702,6 +1757,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (experimentalDebugInfo_ != null) { output.writeMessage(6, getExperimentalDebugInfo()); } + if (experimentalType_ != null) { + output.writeMessage(7, getExperimentalType()); + } unknownFields.writeTo(output); } @@ -1742,6 +1800,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(6, getExperimentalDebugInfo()); } + if (experimentalType_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, getExperimentalType()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1772,6 +1834,11 @@ public boolean equals(final java.lang.Object obj) { if (!getExperimentalDebugInfo() .equals(other.getExperimentalDebugInfo())) return false; } + if (hasExperimentalType() != other.hasExperimentalType()) return false; + if (hasExperimentalType()) { + if (!getExperimentalType() + .equals(other.getExperimentalType())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1801,6 +1868,10 @@ public int hashCode() { hash = (37 * hash) + EXPERIMENTAL_DEBUG_INFO_FIELD_NUMBER; hash = (53 * hash) + getExperimentalDebugInfo().hashCode(); } + if (hasExperimentalType()) { + hash = (37 * hash) + EXPERIMENTAL_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getExperimentalType().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1971,6 +2042,12 @@ public Builder clear() { experimentalDebugInfo_ = null; experimentalDebugInfoBuilder_ = null; } + if (experimentalTypeBuilder_ == null) { + experimentalType_ = null; + } else { + experimentalType_ = null; + experimentalTypeBuilder_ = null; + } return this; } @@ -2013,6 +2090,11 @@ public org.tensorflow.proto.framework.NodeDef buildPartial() { } else { result.experimentalDebugInfo_ = experimentalDebugInfoBuilder_.build(); } + if (experimentalTypeBuilder_ == null) { + result.experimentalType_ = experimentalType_; + } else { + result.experimentalType_ = experimentalTypeBuilder_.build(); + } onBuilt(); return result; } @@ -2088,6 +2170,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.NodeDef other) { if (other.hasExperimentalDebugInfo()) { mergeExperimentalDebugInfo(other.getExperimentalDebugInfo()); } + if (other.hasExperimentalType()) { + mergeExperimentalType(other.getExperimentalType()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -3021,6 +3106,186 @@ public org.tensorflow.proto.framework.NodeDef.ExperimentalDebugInfoOrBuilder get } return experimentalDebugInfoBuilder_; } + + private org.tensorflow.proto.framework.FullTypeDef experimentalType_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.FullTypeDef, org.tensorflow.proto.framework.FullTypeDef.Builder, org.tensorflow.proto.framework.FullTypeDefOrBuilder> experimentalTypeBuilder_; + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public boolean hasExperimentalType() { + return experimentalTypeBuilder_ != null || experimentalType_ != null; + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public org.tensorflow.proto.framework.FullTypeDef getExperimentalType() { + if (experimentalTypeBuilder_ == null) { + return experimentalType_ == null ? org.tensorflow.proto.framework.FullTypeDef.getDefaultInstance() : experimentalType_; + } else { + return experimentalTypeBuilder_.getMessage(); + } + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public Builder setExperimentalType(org.tensorflow.proto.framework.FullTypeDef value) { + if (experimentalTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + experimentalType_ = value; + onChanged(); + } else { + experimentalTypeBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public Builder setExperimentalType( + org.tensorflow.proto.framework.FullTypeDef.Builder builderForValue) { + if (experimentalTypeBuilder_ == null) { + experimentalType_ = builderForValue.build(); + onChanged(); + } else { + experimentalTypeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public Builder mergeExperimentalType(org.tensorflow.proto.framework.FullTypeDef value) { + if (experimentalTypeBuilder_ == null) { + if (experimentalType_ != null) { + experimentalType_ = + org.tensorflow.proto.framework.FullTypeDef.newBuilder(experimentalType_).mergeFrom(value).buildPartial(); + } else { + experimentalType_ = value; + } + onChanged(); + } else { + experimentalTypeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public Builder clearExperimentalType() { + if (experimentalTypeBuilder_ == null) { + experimentalType_ = null; + onChanged(); + } else { + experimentalType_ = null; + experimentalTypeBuilder_ = null; + } + + return this; + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public org.tensorflow.proto.framework.FullTypeDef.Builder getExperimentalTypeBuilder() { + + onChanged(); + return getExperimentalTypeFieldBuilder().getBuilder(); + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + public org.tensorflow.proto.framework.FullTypeDefOrBuilder getExperimentalTypeOrBuilder() { + if (experimentalTypeBuilder_ != null) { + return experimentalTypeBuilder_.getMessageOrBuilder(); + } else { + return experimentalType_ == null ? + org.tensorflow.proto.framework.FullTypeDef.getDefaultInstance() : experimentalType_; + } + } + /** + *
    +     * The complete type of this node. Experimental and subject to change.
    +     * Currently, the field only contains the return types of the node. That will
    +     * extend in the future to contain the entire signature of the node, as a
    +     * function type.
    +     * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.FullTypeDef, org.tensorflow.proto.framework.FullTypeDef.Builder, org.tensorflow.proto.framework.FullTypeDefOrBuilder> + getExperimentalTypeFieldBuilder() { + if (experimentalTypeBuilder_ == null) { + experimentalTypeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.FullTypeDef, org.tensorflow.proto.framework.FullTypeDef.Builder, org.tensorflow.proto.framework.FullTypeDefOrBuilder>( + getExperimentalType(), + getParentForChildren(), + isClean()); + experimentalType_ = null; + } + return experimentalTypeBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDefOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDefOrBuilder.java index 43971913d97..8e1869cd8f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDefOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeDefOrBuilder.java @@ -281,4 +281,38 @@ org.tensorflow.proto.framework.AttrValue getAttrOrThrow( * .tensorflow.NodeDef.ExperimentalDebugInfo experimental_debug_info = 6; */ org.tensorflow.proto.framework.NodeDef.ExperimentalDebugInfoOrBuilder getExperimentalDebugInfoOrBuilder(); + + /** + *
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + boolean hasExperimentalType(); + /** + *
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + org.tensorflow.proto.framework.FullTypeDef getExperimentalType(); + /** + *
    +   * The complete type of this node. Experimental and subject to change.
    +   * Currently, the field only contains the return types of the node. That will
    +   * extend in the future to contain the entire signature of the node, as a
    +   * function type.
    +   * 
    + * + * .tensorflow.FullTypeDef experimental_type = 7; + */ + org.tensorflow.proto.framework.FullTypeDefOrBuilder getExperimentalTypeOrBuilder(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeProto.java index ef4be3729ea..13a26ac8aa0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/NodeProto.java @@ -40,31 +40,34 @@ public static void registerAllExtensions( java.lang.String[] descriptorData = { "\n(tensorflow/core/framework/node_def.pro" + "to\022\ntensorflow\032*tensorflow/core/framewor" + - "k/attr_value.proto\"\322\002\n\007NodeDef\022\014\n\004name\030\001" + - " \001(\t\022\n\n\002op\030\002 \001(\t\022\r\n\005input\030\003 \003(\t\022\016\n\006devic" + - "e\030\004 \001(\t\022+\n\004attr\030\005 \003(\0132\035.tensorflow.NodeD" + - "ef.AttrEntry\022J\n\027experimental_debug_info\030" + - "\006 \001(\0132).tensorflow.NodeDef.ExperimentalD" + - "ebugInfo\032B\n\tAttrEntry\022\013\n\003key\030\001 \001(\t\022$\n\005va" + - "lue\030\002 \001(\0132\025.tensorflow.AttrValue:\0028\001\032Q\n\025" + - "ExperimentalDebugInfo\022\033\n\023original_node_n" + - "ames\030\001 \003(\t\022\033\n\023original_func_names\030\002 \003(\tB" + - "\201\001\n\036org.tensorflow.proto.frameworkB\tNode" + - "ProtoP\001ZOgithub.com/tensorflow/tensorflo" + - "w/tensorflow/go/core/framework/node_def_" + - "go_proto\370\001\001b\006proto3" + "k/attr_value.proto\032)tensorflow/core/fram" + + "ework/full_type.proto\"\206\003\n\007NodeDef\022\014\n\004nam" + + "e\030\001 \001(\t\022\n\n\002op\030\002 \001(\t\022\r\n\005input\030\003 \003(\t\022\016\n\006de" + + "vice\030\004 \001(\t\022+\n\004attr\030\005 \003(\0132\035.tensorflow.No" + + "deDef.AttrEntry\022J\n\027experimental_debug_in" + + "fo\030\006 \001(\0132).tensorflow.NodeDef.Experiment" + + "alDebugInfo\0222\n\021experimental_type\030\007 \001(\0132\027" + + ".tensorflow.FullTypeDef\032B\n\tAttrEntry\022\013\n\003" + + "key\030\001 \001(\t\022$\n\005value\030\002 \001(\0132\025.tensorflow.At" + + "trValue:\0028\001\032Q\n\025ExperimentalDebugInfo\022\033\n\023" + + "original_node_names\030\001 \003(\t\022\033\n\023original_fu" + + "nc_names\030\002 \003(\tB\201\001\n\036org.tensorflow.proto." + + "frameworkB\tNodeProtoP\001ZOgithub.com/tenso" + + "rflow/tensorflow/tensorflow/go/core/fram" + + "ework/node_def_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.tensorflow.proto.framework.AttrValueProtos.getDescriptor(), + org.tensorflow.proto.framework.FullTypeProtos.getDescriptor(), }); internal_static_tensorflow_NodeDef_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_tensorflow_NodeDef_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_NodeDef_descriptor, - new java.lang.String[] { "Name", "Op", "Input", "Device", "Attr", "ExperimentalDebugInfo", }); + new java.lang.String[] { "Name", "Op", "Input", "Device", "Attr", "ExperimentalDebugInfo", "ExperimentalType", }); internal_static_tensorflow_NodeDef_AttrEntry_descriptor = internal_static_tensorflow_NodeDef_descriptor.getNestedTypes().get(0); internal_static_tensorflow_NodeDef_AttrEntry_fieldAccessorTable = new @@ -78,6 +81,7 @@ public static void registerAllExtensions( internal_static_tensorflow_NodeDef_ExperimentalDebugInfo_descriptor, new java.lang.String[] { "OriginalNodeNames", "OriginalFuncNames", }); org.tensorflow.proto.framework.AttrValueProtos.getDescriptor(); + org.tensorflow.proto.framework.FullTypeProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptions.java index 3888c9c9ee6..8868c9bdd42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptions.java @@ -86,6 +86,11 @@ private OptimizerOptions( maxFoldedConstantInBytes_ = input.readInt64(); break; } + case 56: { + + cpuGlobalJit_ = input.readBool(); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -489,6 +494,21 @@ public org.tensorflow.proto.framework.OptimizerOptions.GlobalJitLevel getGlobalJ return result == null ? org.tensorflow.proto.framework.OptimizerOptions.GlobalJitLevel.UNRECOGNIZED : result; } + public static final int CPU_GLOBAL_JIT_FIELD_NUMBER = 7; + private boolean cpuGlobalJit_; + /** + *
    +   * CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
    +   *  - this flag is true, or
    +   *  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
    +   * 
    + * + * bool cpu_global_jit = 7; + */ + public boolean getCpuGlobalJit() { + return cpuGlobalJit_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -521,6 +541,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (maxFoldedConstantInBytes_ != 0L) { output.writeInt64(6, maxFoldedConstantInBytes_); } + if (cpuGlobalJit_ != false) { + output.writeBool(7, cpuGlobalJit_); + } unknownFields.writeTo(output); } @@ -554,6 +577,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(6, maxFoldedConstantInBytes_); } + if (cpuGlobalJit_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, cpuGlobalJit_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -579,6 +606,8 @@ public boolean equals(final java.lang.Object obj) { != other.getDoFunctionInlining()) return false; if (optLevel_ != other.optLevel_) return false; if (globalJitLevel_ != other.globalJitLevel_) return false; + if (getCpuGlobalJit() + != other.getCpuGlobalJit()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -606,6 +635,9 @@ public int hashCode() { hash = (53 * hash) + optLevel_; hash = (37 * hash) + GLOBAL_JIT_LEVEL_FIELD_NUMBER; hash = (53 * hash) + globalJitLevel_; + hash = (37 * hash) + CPU_GLOBAL_JIT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getCpuGlobalJit()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -755,6 +787,8 @@ public Builder clear() { globalJitLevel_ = 0; + cpuGlobalJit_ = false; + return this; } @@ -787,6 +821,7 @@ public org.tensorflow.proto.framework.OptimizerOptions buildPartial() { result.doFunctionInlining_ = doFunctionInlining_; result.optLevel_ = optLevel_; result.globalJitLevel_ = globalJitLevel_; + result.cpuGlobalJit_ = cpuGlobalJit_; onBuilt(); return result; } @@ -853,6 +888,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.OptimizerOptions other) if (other.globalJitLevel_ != 0) { setGlobalJitLevelValue(other.getGlobalJitLevelValue()); } + if (other.getCpuGlobalJit() != false) { + setCpuGlobalJit(other.getCpuGlobalJit()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1175,6 +1213,50 @@ public Builder clearGlobalJitLevel() { onChanged(); return this; } + + private boolean cpuGlobalJit_ ; + /** + *
    +     * CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
    +     *  - this flag is true, or
    +     *  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
    +     * 
    + * + * bool cpu_global_jit = 7; + */ + public boolean getCpuGlobalJit() { + return cpuGlobalJit_; + } + /** + *
    +     * CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
    +     *  - this flag is true, or
    +     *  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
    +     * 
    + * + * bool cpu_global_jit = 7; + */ + public Builder setCpuGlobalJit(boolean value) { + + cpuGlobalJit_ = value; + onChanged(); + return this; + } + /** + *
    +     * CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
    +     *  - this flag is true, or
    +     *  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
    +     * 
    + * + * bool cpu_global_jit = 7; + */ + public Builder clearCpuGlobalJit() { + + cpuGlobalJit_ = false; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptionsOrBuilder.java index 0b2916cedad..69397ec8c5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptionsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/OptimizerOptionsOrBuilder.java @@ -79,4 +79,15 @@ public interface OptimizerOptionsOrBuilder extends * .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5; */ org.tensorflow.proto.framework.OptimizerOptions.GlobalJitLevel getGlobalJitLevel(); + + /** + *
    +   * CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
    +   *  - this flag is true, or
    +   *  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
    +   * 
    + * + * bool cpu_global_jit = 7; + */ + boolean getCpuGlobalJit(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java index 4f585ec9839..4a4390f1e6a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java @@ -18,6 +18,7 @@ private SavedObject(com.google.protobuf.GeneratedMessageV3.Builder builder) { private SavedObject() { children_ = java.util.Collections.emptyList(); slotVariables_ = java.util.Collections.emptyList(); + registeredName_ = ""; } @java.lang.Override @@ -194,6 +195,25 @@ private SavedObject( kindCase_ = 12; break; } + case 106: { + java.lang.String s = input.readStringRequireUtf8(); + + registeredName_ = s; + break; + } + case 114: { + com.google.protobuf.Any.Builder subBuilder = null; + if (serializedUserProto_ != null) { + subBuilder = serializedUserProto_.toBuilder(); + } + serializedUserProto_ = input.readMessage(com.google.protobuf.Any.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serializedUserProto_); + serializedUserProto_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -713,6 +733,95 @@ public org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( return map.get(key); } + public static final int REGISTERED_NAME_FIELD_NUMBER = 13; + private volatile java.lang.Object registeredName_; + /** + *
    +   * The fields below are filled when the user serializes a registered Trackable
    +   * class. Registered classes may save additional metadata and supersede the
    +   * default loading process where nodes are recreated from the proto.
    +   * The name of the registered class of the form "{package}.{class_name}".
    +   * This field is used to search for the registered class at loading time.
    +   * 
    + * + * string registered_name = 13; + */ + public java.lang.String getRegisteredName() { + java.lang.Object ref = registeredName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + registeredName_ = s; + return s; + } + } + /** + *
    +   * The fields below are filled when the user serializes a registered Trackable
    +   * class. Registered classes may save additional metadata and supersede the
    +   * default loading process where nodes are recreated from the proto.
    +   * The name of the registered class of the form "{package}.{class_name}".
    +   * This field is used to search for the registered class at loading time.
    +   * 
    + * + * string registered_name = 13; + */ + public com.google.protobuf.ByteString + getRegisteredNameBytes() { + java.lang.Object ref = registeredName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + registeredName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERIALIZED_USER_PROTO_FIELD_NUMBER = 14; + private com.google.protobuf.Any serializedUserProto_; + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public boolean hasSerializedUserProto() { + return serializedUserProto_ != null; + } + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public com.google.protobuf.Any getSerializedUserProto() { + return serializedUserProto_ == null ? com.google.protobuf.Any.getDefaultInstance() : serializedUserProto_; + } + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder() { + return getSerializedUserProto(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -763,6 +872,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (kindCase_ == 12) { output.writeMessage(12, (org.tensorflow.proto.framework.CapturedTensor) kind_); } + if (!getRegisteredNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 13, registeredName_); + } + if (serializedUserProto_ != null) { + output.writeMessage(14, getSerializedUserProto()); + } unknownFields.writeTo(output); } @@ -822,6 +937,13 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(12, (org.tensorflow.proto.framework.CapturedTensor) kind_); } + if (!getRegisteredNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, registeredName_); + } + if (serializedUserProto_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(14, getSerializedUserProto()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -843,6 +965,13 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getSlotVariablesList())) return false; if (!internalGetSaveableObjects().equals( other.internalGetSaveableObjects())) return false; + if (!getRegisteredName() + .equals(other.getRegisteredName())) return false; + if (hasSerializedUserProto() != other.hasSerializedUserProto()) return false; + if (hasSerializedUserProto()) { + if (!getSerializedUserProto() + .equals(other.getSerializedUserProto())) return false; + } if (!getKindCase().equals(other.getKindCase())) return false; switch (kindCase_) { case 4: @@ -903,6 +1032,12 @@ public int hashCode() { hash = (37 * hash) + SAVEABLE_OBJECTS_FIELD_NUMBER; hash = (53 * hash) + internalGetSaveableObjects().hashCode(); } + hash = (37 * hash) + REGISTERED_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRegisteredName().hashCode(); + if (hasSerializedUserProto()) { + hash = (37 * hash) + SERIALIZED_USER_PROTO_FIELD_NUMBER; + hash = (53 * hash) + getSerializedUserProto().hashCode(); + } switch (kindCase_) { case 4: hash = (37 * hash) + USER_OBJECT_FIELD_NUMBER; @@ -1109,6 +1244,14 @@ public Builder clear() { slotVariablesBuilder_.clear(); } internalGetMutableSaveableObjects().clear(); + registeredName_ = ""; + + if (serializedUserProtoBuilder_ == null) { + serializedUserProto_ = null; + } else { + serializedUserProto_ = null; + serializedUserProtoBuilder_ = null; + } kindCase_ = 0; kind_ = null; return this; @@ -1214,6 +1357,12 @@ public org.tensorflow.proto.framework.SavedObject buildPartial() { } result.saveableObjects_ = internalGetSaveableObjects(); result.saveableObjects_.makeImmutable(); + result.registeredName_ = registeredName_; + if (serializedUserProtoBuilder_ == null) { + result.serializedUserProto_ = serializedUserProto_; + } else { + result.serializedUserProto_ = serializedUserProtoBuilder_.build(); + } result.kindCase_ = kindCase_; onBuilt(); return result; @@ -1317,6 +1466,13 @@ public Builder mergeFrom(org.tensorflow.proto.framework.SavedObject other) { } internalGetMutableSaveableObjects().mergeFrom( other.internalGetSaveableObjects()); + if (!other.getRegisteredName().isEmpty()) { + registeredName_ = other.registeredName_; + onChanged(); + } + if (other.hasSerializedUserProto()) { + mergeSerializedUserProto(other.getSerializedUserProto()); + } switch (other.getKindCase()) { case USER_OBJECT: { mergeUserObject(other.getUserObject()); @@ -3323,6 +3479,286 @@ public Builder putAllSaveableObjects( .putAll(values); return this; } + + private java.lang.Object registeredName_ = ""; + /** + *
    +     * The fields below are filled when the user serializes a registered Trackable
    +     * class. Registered classes may save additional metadata and supersede the
    +     * default loading process where nodes are recreated from the proto.
    +     * The name of the registered class of the form "{package}.{class_name}".
    +     * This field is used to search for the registered class at loading time.
    +     * 
    + * + * string registered_name = 13; + */ + public java.lang.String getRegisteredName() { + java.lang.Object ref = registeredName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + registeredName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +     * The fields below are filled when the user serializes a registered Trackable
    +     * class. Registered classes may save additional metadata and supersede the
    +     * default loading process where nodes are recreated from the proto.
    +     * The name of the registered class of the form "{package}.{class_name}".
    +     * This field is used to search for the registered class at loading time.
    +     * 
    + * + * string registered_name = 13; + */ + public com.google.protobuf.ByteString + getRegisteredNameBytes() { + java.lang.Object ref = registeredName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + registeredName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +     * The fields below are filled when the user serializes a registered Trackable
    +     * class. Registered classes may save additional metadata and supersede the
    +     * default loading process where nodes are recreated from the proto.
    +     * The name of the registered class of the form "{package}.{class_name}".
    +     * This field is used to search for the registered class at loading time.
    +     * 
    + * + * string registered_name = 13; + */ + public Builder setRegisteredName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + registeredName_ = value; + onChanged(); + return this; + } + /** + *
    +     * The fields below are filled when the user serializes a registered Trackable
    +     * class. Registered classes may save additional metadata and supersede the
    +     * default loading process where nodes are recreated from the proto.
    +     * The name of the registered class of the form "{package}.{class_name}".
    +     * This field is used to search for the registered class at loading time.
    +     * 
    + * + * string registered_name = 13; + */ + public Builder clearRegisteredName() { + + registeredName_ = getDefaultInstance().getRegisteredName(); + onChanged(); + return this; + } + /** + *
    +     * The fields below are filled when the user serializes a registered Trackable
    +     * class. Registered classes may save additional metadata and supersede the
    +     * default loading process where nodes are recreated from the proto.
    +     * The name of the registered class of the form "{package}.{class_name}".
    +     * This field is used to search for the registered class at loading time.
    +     * 
    + * + * string registered_name = 13; + */ + public Builder setRegisteredNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + registeredName_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Any serializedUserProto_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Any, com.google.protobuf.Any.Builder, com.google.protobuf.AnyOrBuilder> serializedUserProtoBuilder_; + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public boolean hasSerializedUserProto() { + return serializedUserProtoBuilder_ != null || serializedUserProto_ != null; + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public com.google.protobuf.Any getSerializedUserProto() { + if (serializedUserProtoBuilder_ == null) { + return serializedUserProto_ == null ? com.google.protobuf.Any.getDefaultInstance() : serializedUserProto_; + } else { + return serializedUserProtoBuilder_.getMessage(); + } + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public Builder setSerializedUserProto(com.google.protobuf.Any value) { + if (serializedUserProtoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serializedUserProto_ = value; + onChanged(); + } else { + serializedUserProtoBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public Builder setSerializedUserProto( + com.google.protobuf.Any.Builder builderForValue) { + if (serializedUserProtoBuilder_ == null) { + serializedUserProto_ = builderForValue.build(); + onChanged(); + } else { + serializedUserProtoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public Builder mergeSerializedUserProto(com.google.protobuf.Any value) { + if (serializedUserProtoBuilder_ == null) { + if (serializedUserProto_ != null) { + serializedUserProto_ = + com.google.protobuf.Any.newBuilder(serializedUserProto_).mergeFrom(value).buildPartial(); + } else { + serializedUserProto_ = value; + } + onChanged(); + } else { + serializedUserProtoBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public Builder clearSerializedUserProto() { + if (serializedUserProtoBuilder_ == null) { + serializedUserProto_ = null; + onChanged(); + } else { + serializedUserProto_ = null; + serializedUserProtoBuilder_ = null; + } + + return this; + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public com.google.protobuf.Any.Builder getSerializedUserProtoBuilder() { + + onChanged(); + return getSerializedUserProtoFieldBuilder().getBuilder(); + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + public com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder() { + if (serializedUserProtoBuilder_ != null) { + return serializedUserProtoBuilder_.getMessageOrBuilder(); + } else { + return serializedUserProto_ == null ? + com.google.protobuf.Any.getDefaultInstance() : serializedUserProto_; + } + } + /** + *
    +     * The user-generated proto storing metadata for this object, to be passed to
    +     * the registered classes's _deserialize_from_proto method when this object is
    +     * loaded from the SavedModel.
    +     * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Any, com.google.protobuf.Any.Builder, com.google.protobuf.AnyOrBuilder> + getSerializedUserProtoFieldBuilder() { + if (serializedUserProtoBuilder_ == null) { + serializedUserProtoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Any, com.google.protobuf.Any.Builder, com.google.protobuf.AnyOrBuilder>( + getSerializedUserProto(), + getParentForChildren(), + isClean()); + serializedUserProto_ = null; + } + return serializedUserProtoBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java index d9cbd0a8f02..9d9180a990b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java @@ -99,82 +99,85 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n1tensorflow/core/protobuf/saved_object_" + - "graph.proto\022\ntensorflow\032,tensorflow/core" + - "/framework/tensor_shape.proto\032%tensorflo" + - "w/core/framework/types.proto\032(tensorflow" + - "/core/framework/variable.proto\032(tensorfl" + - "ow/core/framework/versions.proto\032%tensor" + - "flow/core/protobuf/struct.proto\0325tensorf" + - "low/core/protobuf/trackable_object_graph" + - ".proto\"\350\001\n\020SavedObjectGraph\022&\n\005nodes\030\001 \003" + - "(\0132\027.tensorflow.SavedObject\022O\n\022concrete_" + - "functions\030\002 \003(\01323.tensorflow.SavedObject" + - "Graph.ConcreteFunctionsEntry\032[\n\026Concrete" + - "FunctionsEntry\022\013\n\003key\030\001 \001(\t\0220\n\005value\030\002 \001" + - "(\0132!.tensorflow.SavedConcreteFunction:\0028" + - "\001\"\220\006\n\013SavedObject\022R\n\010children\030\001 \003(\0132@.te" + - "nsorflow.TrackableObjectGraph.TrackableO" + - "bject.ObjectReference\022^\n\016slot_variables\030" + - "\003 \003(\0132F.tensorflow.TrackableObjectGraph." + - "TrackableObject.SlotVariableReference\0222\n" + - "\013user_object\030\004 \001(\0132\033.tensorflow.SavedUse" + - "rObjectH\000\022\'\n\005asset\030\005 \001(\0132\026.tensorflow.Sa" + - "vedAssetH\000\022-\n\010function\030\006 \001(\0132\031.tensorflo" + - "w.SavedFunctionH\000\022-\n\010variable\030\007 \001(\0132\031.te" + - "nsorflow.SavedVariableH\000\022G\n\026bare_concret" + - "e_function\030\010 \001(\0132%.tensorflow.SavedBareC" + - "oncreteFunctionH\000\022-\n\010constant\030\t \001(\0132\031.te" + - "nsorflow.SavedConstantH\000\022-\n\010resource\030\n \001" + - "(\0132\031.tensorflow.SavedResourceH\000\0225\n\017captu" + - "red_tensor\030\014 \001(\0132\032.tensorflow.CapturedTe" + - "nsorH\000\022F\n\020saveable_objects\030\013 \003(\0132,.tenso" + - "rflow.SavedObject.SaveableObjectsEntry\032R" + - "\n\024SaveableObjectsEntry\022\013\n\003key\030\001 \001(\t\022)\n\005v" + - "alue\030\002 \001(\0132\032.tensorflow.SaveableObject:\002" + - "8\001B\006\n\004kindJ\004\010\002\020\003R\nattributes\"d\n\017SavedUse" + - "rObject\022\022\n\nidentifier\030\001 \001(\t\022\'\n\007version\030\002" + - " \001(\0132\026.tensorflow.VersionDef\022\024\n\010metadata" + - "\030\003 \001(\tB\002\030\001\"*\n\nSavedAsset\022\034\n\024asset_file_d" + - "ef_index\030\001 \001(\005\"\\\n\rSavedFunction\022\032\n\022concr" + - "ete_functions\030\001 \003(\t\022/\n\rfunction_spec\030\002 \001" + - "(\0132\030.tensorflow.FunctionSpec\"9\n\016Captured" + - "Tensor\022\014\n\004name\030\001 \001(\t\022\031\n\021concrete_functio" + - "n\030\002 \001(\t\"\250\001\n\025SavedConcreteFunction\022\024\n\014bou" + - "nd_inputs\030\002 \003(\005\022B\n\035canonicalized_input_s" + - "ignature\030\003 \001(\0132\033.tensorflow.StructuredVa" + - "lue\0225\n\020output_signature\030\004 \001(\0132\033.tensorfl" + - "ow.StructuredValue\"\255\001\n\031SavedBareConcrete" + - "Function\022\036\n\026concrete_function_name\030\001 \001(\t" + - "\022\031\n\021argument_keywords\030\002 \003(\t\022$\n\034allowed_p" + - "ositional_arguments\030\003 \001(\003\022/\n\rfunction_sp" + - "ec\030\004 \001(\0132\030.tensorflow.FunctionSpec\"\"\n\rSa" + - "vedConstant\022\021\n\toperation\030\001 \001(\t\"\327\002\n\rSaved" + - "Variable\022#\n\005dtype\030\001 \001(\0162\024.tensorflow.Dat" + - "aType\022+\n\005shape\030\002 \001(\0132\034.tensorflow.Tensor" + - "ShapeProto\022\021\n\ttrainable\030\003 \001(\010\022<\n\017synchro" + - "nization\030\004 \001(\0162#.tensorflow.VariableSync" + - "hronization\0224\n\013aggregation\030\005 \001(\0162\037.tenso" + - "rflow.VariableAggregation\022\014\n\004name\030\006 \001(\t\022" + - "\016\n\006device\030\007 \001(\t\022O\n,experimental_distribu" + - "ted_variable_components\030\010 \003(\0132\031.tensorfl" + - "ow.SavedVariable\"\373\001\n\014FunctionSpec\0220\n\013ful" + - "largspec\030\001 \001(\0132\033.tensorflow.StructuredVa" + - "lue\022\021\n\tis_method\030\002 \001(\010\0224\n\017input_signatur" + - "e\030\005 \001(\0132\033.tensorflow.StructuredValue\0228\n\013" + - "jit_compile\030\006 \001(\0162#.tensorflow.FunctionS" + - "pec.JitCompile\"*\n\nJitCompile\022\013\n\007DEFAULT\020" + - "\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002J\004\010\003\020\004J\004\010\004\020\005\"\037\n\rSavedR" + - "esource\022\016\n\006device\030\001 \001(\t\"A\n\016SaveableObjec" + - "t\022\025\n\rsave_function\030\002 \001(\005\022\030\n\020restore_func" + - "tion\030\003 \001(\005B\224\001\n\036org.tensorflow.proto.fram" + - "eworkB\026SavedObjectGraphProtosP\001ZUgithub." + - "com/tensorflow/tensorflow/tensorflow/go/" + - "core/protobuf/for_core_protos_go_proto\370\001" + - "\001b\006proto3" + "graph.proto\022\ntensorflow\032\031google/protobuf" + + "/any.proto\032,tensorflow/core/framework/te" + + "nsor_shape.proto\032%tensorflow/core/framew" + + "ork/types.proto\032(tensorflow/core/framewo" + + "rk/variable.proto\032(tensorflow/core/frame" + + "work/versions.proto\032%tensorflow/core/pro" + + "tobuf/struct.proto\0325tensorflow/core/prot" + + "obuf/trackable_object_graph.proto\"\350\001\n\020Sa" + + "vedObjectGraph\022&\n\005nodes\030\001 \003(\0132\027.tensorfl" + + "ow.SavedObject\022O\n\022concrete_functions\030\002 \003" + + "(\01323.tensorflow.SavedObjectGraph.Concret" + + "eFunctionsEntry\032[\n\026ConcreteFunctionsEntr" + + "y\022\013\n\003key\030\001 \001(\t\0220\n\005value\030\002 \001(\0132!.tensorfl" + + "ow.SavedConcreteFunction:\0028\001\"\336\006\n\013SavedOb" + + "ject\022R\n\010children\030\001 \003(\0132@.tensorflow.Trac" + + "kableObjectGraph.TrackableObject.ObjectR" + + "eference\022^\n\016slot_variables\030\003 \003(\0132F.tenso" + + "rflow.TrackableObjectGraph.TrackableObje" + + "ct.SlotVariableReference\0222\n\013user_object\030" + + "\004 \001(\0132\033.tensorflow.SavedUserObjectH\000\022\'\n\005" + + "asset\030\005 \001(\0132\026.tensorflow.SavedAssetH\000\022-\n" + + "\010function\030\006 \001(\0132\031.tensorflow.SavedFuncti" + + "onH\000\022-\n\010variable\030\007 \001(\0132\031.tensorflow.Save" + + "dVariableH\000\022G\n\026bare_concrete_function\030\010 " + + "\001(\0132%.tensorflow.SavedBareConcreteFuncti" + + "onH\000\022-\n\010constant\030\t \001(\0132\031.tensorflow.Save" + + "dConstantH\000\022-\n\010resource\030\n \001(\0132\031.tensorfl" + + "ow.SavedResourceH\000\0225\n\017captured_tensor\030\014 " + + "\001(\0132\032.tensorflow.CapturedTensorH\000\022F\n\020sav" + + "eable_objects\030\013 \003(\0132,.tensorflow.SavedOb" + + "ject.SaveableObjectsEntry\022\027\n\017registered_" + + "name\030\r \001(\t\0223\n\025serialized_user_proto\030\016 \001(" + + "\0132\024.google.protobuf.Any\032R\n\024SaveableObjec" + + "tsEntry\022\013\n\003key\030\001 \001(\t\022)\n\005value\030\002 \001(\0132\032.te" + + "nsorflow.SaveableObject:\0028\001B\006\n\004kindJ\004\010\002\020" + + "\003R\nattributes\"d\n\017SavedUserObject\022\022\n\niden" + + "tifier\030\001 \001(\t\022\'\n\007version\030\002 \001(\0132\026.tensorfl" + + "ow.VersionDef\022\024\n\010metadata\030\003 \001(\tB\002\030\001\"*\n\nS" + + "avedAsset\022\034\n\024asset_file_def_index\030\001 \001(\005\"" + + "\\\n\rSavedFunction\022\032\n\022concrete_functions\030\001" + + " \003(\t\022/\n\rfunction_spec\030\002 \001(\0132\030.tensorflow" + + ".FunctionSpec\"9\n\016CapturedTensor\022\014\n\004name\030" + + "\001 \001(\t\022\031\n\021concrete_function\030\002 \001(\t\"\250\001\n\025Sav" + + "edConcreteFunction\022\024\n\014bound_inputs\030\002 \003(\005" + + "\022B\n\035canonicalized_input_signature\030\003 \001(\0132" + + "\033.tensorflow.StructuredValue\0225\n\020output_s" + + "ignature\030\004 \001(\0132\033.tensorflow.StructuredVa" + + "lue\"\255\001\n\031SavedBareConcreteFunction\022\036\n\026con" + + "crete_function_name\030\001 \001(\t\022\031\n\021argument_ke" + + "ywords\030\002 \003(\t\022$\n\034allowed_positional_argum" + + "ents\030\003 \001(\003\022/\n\rfunction_spec\030\004 \001(\0132\030.tens" + + "orflow.FunctionSpec\"\"\n\rSavedConstant\022\021\n\t" + + "operation\030\001 \001(\t\"\327\002\n\rSavedVariable\022#\n\005dty" + + "pe\030\001 \001(\0162\024.tensorflow.DataType\022+\n\005shape\030" + + "\002 \001(\0132\034.tensorflow.TensorShapeProto\022\021\n\tt" + + "rainable\030\003 \001(\010\022<\n\017synchronization\030\004 \001(\0162" + + "#.tensorflow.VariableSynchronization\0224\n\013" + + "aggregation\030\005 \001(\0162\037.tensorflow.VariableA" + + "ggregation\022\014\n\004name\030\006 \001(\t\022\016\n\006device\030\007 \001(\t" + + "\022O\n,experimental_distributed_variable_co" + + "mponents\030\010 \003(\0132\031.tensorflow.SavedVariabl" + + "e\"\373\001\n\014FunctionSpec\0220\n\013fullargspec\030\001 \001(\0132" + + "\033.tensorflow.StructuredValue\022\021\n\tis_metho" + + "d\030\002 \001(\010\0224\n\017input_signature\030\005 \001(\0132\033.tenso" + + "rflow.StructuredValue\0228\n\013jit_compile\030\006 \001" + + "(\0162#.tensorflow.FunctionSpec.JitCompile\"" + + "*\n\nJitCompile\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OF" + + "F\020\002J\004\010\003\020\004J\004\010\004\020\005\"\037\n\rSavedResource\022\016\n\006devi" + + "ce\030\001 \001(\t\"A\n\016SaveableObject\022\025\n\rsave_funct" + + "ion\030\002 \001(\005\022\030\n\020restore_function\030\003 \001(\005B\224\001\n\036" + + "org.tensorflow.proto.frameworkB\026SavedObj" + + "ectGraphProtosP\001ZUgithub.com/tensorflow/" + + "tensorflow/tensorflow/go/core/protobuf/f" + + "or_core_protos_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.AnyProto.getDescriptor(), org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(), org.tensorflow.proto.framework.TypesProtos.getDescriptor(), org.tensorflow.proto.framework.VariableProtos.getDescriptor(), @@ -199,7 +202,7 @@ public static void registerAllExtensions( internal_static_tensorflow_SavedObject_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_SavedObject_descriptor, - new java.lang.String[] { "Children", "SlotVariables", "UserObject", "Asset", "Function", "Variable", "BareConcreteFunction", "Constant", "Resource", "CapturedTensor", "SaveableObjects", "Kind", }); + new java.lang.String[] { "Children", "SlotVariables", "UserObject", "Asset", "Function", "Variable", "BareConcreteFunction", "Constant", "Resource", "CapturedTensor", "SaveableObjects", "RegisteredName", "SerializedUserProto", "Kind", }); internal_static_tensorflow_SavedObject_SaveableObjectsEntry_descriptor = internal_static_tensorflow_SavedObject_descriptor.getNestedTypes().get(0); internal_static_tensorflow_SavedObject_SaveableObjectsEntry_fieldAccessorTable = new @@ -272,6 +275,7 @@ public static void registerAllExtensions( com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_SaveableObject_descriptor, new java.lang.String[] { "SaveFunction", "RestoreFunction", }); + com.google.protobuf.AnyProto.getDescriptor(); org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(); org.tensorflow.proto.framework.TypesProtos.getDescriptor(); org.tensorflow.proto.framework.VariableProtos.getDescriptor(); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java index 7a7ebf1b810..be025d44436 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java @@ -258,5 +258,62 @@ org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrDefault( org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( java.lang.String key); + /** + *
    +   * The fields below are filled when the user serializes a registered Trackable
    +   * class. Registered classes may save additional metadata and supersede the
    +   * default loading process where nodes are recreated from the proto.
    +   * The name of the registered class of the form "{package}.{class_name}".
    +   * This field is used to search for the registered class at loading time.
    +   * 
    + * + * string registered_name = 13; + */ + java.lang.String getRegisteredName(); + /** + *
    +   * The fields below are filled when the user serializes a registered Trackable
    +   * class. Registered classes may save additional metadata and supersede the
    +   * default loading process where nodes are recreated from the proto.
    +   * The name of the registered class of the form "{package}.{class_name}".
    +   * This field is used to search for the registered class at loading time.
    +   * 
    + * + * string registered_name = 13; + */ + com.google.protobuf.ByteString + getRegisteredNameBytes(); + + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + boolean hasSerializedUserProto(); + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + com.google.protobuf.Any getSerializedUserProto(); + /** + *
    +   * The user-generated proto storing metadata for this object, to be passed to
    +   * the registered classes's _deserialize_from_proto method when this object is
    +   * loaded from the SavedModel.
    +   * 
    + * + * .google.protobuf.Any serialized_user_proto = 14; + */ + com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder(); + public org.tensorflow.proto.framework.SavedObject.KindCase getKindCase(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java new file mode 100644 index 00000000000..cbc8a5b5e44 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java @@ -0,0 +1,478 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/status.proto + +package org.tensorflow.proto.framework; + +public final class Status { + private Status() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface DerivedStatusOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.DerivedStatus) + com.google.protobuf.MessageOrBuilder { + } + /** + *
    +   * If included as a payload, this message flags the Status to be a "derived"
    +   * Status. Used by StatusGroup to ignore certain Statuses when reporting
    +   * errors to end users.
    +   * 
    + * + * Protobuf type {@code tensorflow.DerivedStatus} + */ + public static final class DerivedStatus extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.DerivedStatus) + DerivedStatusOrBuilder { + private static final long serialVersionUID = 0L; + // Use DerivedStatus.newBuilder() to construct. + private DerivedStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DerivedStatus() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new DerivedStatus(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DerivedStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.Status.internal_static_tensorflow_DerivedStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.Status.internal_static_tensorflow_DerivedStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.Status.DerivedStatus.class, org.tensorflow.proto.framework.Status.DerivedStatus.Builder.class); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.Status.DerivedStatus)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.Status.DerivedStatus other = (org.tensorflow.proto.framework.Status.DerivedStatus) obj; + + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.Status.DerivedStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.Status.DerivedStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * If included as a payload, this message flags the Status to be a "derived"
    +     * Status. Used by StatusGroup to ignore certain Statuses when reporting
    +     * errors to end users.
    +     * 
    + * + * Protobuf type {@code tensorflow.DerivedStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.DerivedStatus) + org.tensorflow.proto.framework.Status.DerivedStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.Status.internal_static_tensorflow_DerivedStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.Status.internal_static_tensorflow_DerivedStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.Status.DerivedStatus.class, org.tensorflow.proto.framework.Status.DerivedStatus.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.Status.DerivedStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.Status.internal_static_tensorflow_DerivedStatus_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.Status.DerivedStatus getDefaultInstanceForType() { + return org.tensorflow.proto.framework.Status.DerivedStatus.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.Status.DerivedStatus build() { + org.tensorflow.proto.framework.Status.DerivedStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.Status.DerivedStatus buildPartial() { + org.tensorflow.proto.framework.Status.DerivedStatus result = new org.tensorflow.proto.framework.Status.DerivedStatus(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.Status.DerivedStatus) { + return mergeFrom((org.tensorflow.proto.framework.Status.DerivedStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.Status.DerivedStatus other) { + if (other == org.tensorflow.proto.framework.Status.DerivedStatus.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.Status.DerivedStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.Status.DerivedStatus) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.DerivedStatus) + } + + // @@protoc_insertion_point(class_scope:tensorflow.DerivedStatus) + private static final org.tensorflow.proto.framework.Status.DerivedStatus DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.Status.DerivedStatus(); + } + + public static org.tensorflow.proto.framework.Status.DerivedStatus getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DerivedStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DerivedStatus(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.Status.DerivedStatus getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_DerivedStatus_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_DerivedStatus_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n%tensorflow/core/protobuf/status.proto\022" + + "\ntensorflow\"\017\n\rDerivedStatusB \n\036org.tens" + + "orflow.proto.frameworkb\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_DerivedStatus_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_DerivedStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_DerivedStatus_descriptor, + new java.lang.String[] { }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java index 7f3f81e4d34..eb4febd9720 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/StructProtos.java @@ -117,22 +117,23 @@ public static void registerAllExtensions( "hapeProto\022#\n\005dtype\030\003 \001(\0162\024.tensorflow.Da" + "taType\022(\n\007minimum\030\004 \001(\0132\027.tensorflow.Ten" + "sorProto\022(\n\007maximum\030\005 \001(\0132\027.tensorflow.T" + - "ensorProto\"\333\003\n\rTypeSpecProto\022@\n\017type_spe" + + "ensorProto\"\370\003\n\rTypeSpecProto\022@\n\017type_spe" + "c_class\030\001 \001(\0162\'.tensorflow.TypeSpecProto" + ".TypeSpecClass\022/\n\ntype_state\030\002 \001(\0132\033.ten" + "sorflow.StructuredValue\022\034\n\024type_spec_cla" + - "ss_name\030\003 \001(\t\"\270\002\n\rTypeSpecClass\022\013\n\007UNKNO" + - "WN\020\000\022\026\n\022SPARSE_TENSOR_SPEC\020\001\022\027\n\023INDEXED_" + - "SLICES_SPEC\020\002\022\026\n\022RAGGED_TENSOR_SPEC\020\003\022\025\n" + - "\021TENSOR_ARRAY_SPEC\020\004\022\025\n\021DATA_DATASET_SPE" + - "C\020\005\022\026\n\022DATA_ITERATOR_SPEC\020\006\022\021\n\rOPTIONAL_" + - "SPEC\020\007\022\024\n\020PER_REPLICA_SPEC\020\010\022\021\n\rVARIABLE" + - "_SPEC\020\t\022\026\n\022ROW_PARTITION_SPEC\020\n\022\030\n\024REGIS" + - "TERED_TYPE_SPEC\020\014\022\027\n\023EXTENSION_TYPE_SPEC" + - "\020\r\"\004\010\013\020\013B\207\001\n\036org.tensorflow.proto.framew" + - "orkB\014StructProtosP\001ZUgithub.com/tensorfl" + - "ow/tensorflow/tensorflow/go/core/protobu" + - "f/for_core_protos_go_protob\006proto3" + "ss_name\030\003 \001(\t\022\033\n\023num_flat_components\030\004 \001" + + "(\005\"\270\002\n\rTypeSpecClass\022\013\n\007UNKNOWN\020\000\022\026\n\022SPA" + + "RSE_TENSOR_SPEC\020\001\022\027\n\023INDEXED_SLICES_SPEC" + + "\020\002\022\026\n\022RAGGED_TENSOR_SPEC\020\003\022\025\n\021TENSOR_ARR" + + "AY_SPEC\020\004\022\025\n\021DATA_DATASET_SPEC\020\005\022\026\n\022DATA" + + "_ITERATOR_SPEC\020\006\022\021\n\rOPTIONAL_SPEC\020\007\022\024\n\020P" + + "ER_REPLICA_SPEC\020\010\022\021\n\rVARIABLE_SPEC\020\t\022\026\n\022" + + "ROW_PARTITION_SPEC\020\n\022\030\n\024REGISTERED_TYPE_" + + "SPEC\020\014\022\027\n\023EXTENSION_TYPE_SPEC\020\r\"\004\010\013\020\013B\207\001" + + "\n\036org.tensorflow.proto.frameworkB\014Struct" + + "ProtosP\001ZUgithub.com/tensorflow/tensorfl" + + "ow/tensorflow/go/core/protobuf/for_core_" + + "protos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -206,7 +207,7 @@ public static void registerAllExtensions( internal_static_tensorflow_TypeSpecProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_TypeSpecProto_descriptor, - new java.lang.String[] { "TypeSpecClass", "TypeState", "TypeSpecClassName", }); + new java.lang.String[] { "TypeSpecClass", "TypeState", "TypeSpecClassName", "NumFlatComponents", }); org.tensorflow.proto.framework.TensorProtos.getDescriptor(); org.tensorflow.proto.framework.TensorShapeProtos.getDescriptor(); org.tensorflow.proto.framework.TypesProtos.getDescriptor(); diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java index 5701328f21f..6958037d582 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java @@ -79,6 +79,11 @@ private TypeSpecProto( typeSpecClassName_ = s; break; } + case 32: { + + numFlatComponents_ = input.readInt32(); + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -510,6 +515,19 @@ public java.lang.String getTypeSpecClassName() { } } + public static final int NUM_FLAT_COMPONENTS_FIELD_NUMBER = 4; + private int numFlatComponents_; + /** + *
    +   * The number of flat tensor components required by this TypeSpec.
    +   * 
    + * + * int32 num_flat_components = 4; + */ + public int getNumFlatComponents() { + return numFlatComponents_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -533,6 +551,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (!getTypeSpecClassNameBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, typeSpecClassName_); } + if (numFlatComponents_ != 0) { + output.writeInt32(4, numFlatComponents_); + } unknownFields.writeTo(output); } @@ -553,6 +574,10 @@ public int getSerializedSize() { if (!getTypeSpecClassNameBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, typeSpecClassName_); } + if (numFlatComponents_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, numFlatComponents_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -576,6 +601,8 @@ public boolean equals(final java.lang.Object obj) { } if (!getTypeSpecClassName() .equals(other.getTypeSpecClassName())) return false; + if (getNumFlatComponents() + != other.getNumFlatComponents()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -595,6 +622,8 @@ public int hashCode() { } hash = (37 * hash) + TYPE_SPEC_CLASS_NAME_FIELD_NUMBER; hash = (53 * hash) + getTypeSpecClassName().hashCode(); + hash = (37 * hash) + NUM_FLAT_COMPONENTS_FIELD_NUMBER; + hash = (53 * hash) + getNumFlatComponents(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -742,6 +771,8 @@ public Builder clear() { } typeSpecClassName_ = ""; + numFlatComponents_ = 0; + return this; } @@ -775,6 +806,7 @@ public org.tensorflow.proto.framework.TypeSpecProto buildPartial() { result.typeState_ = typeStateBuilder_.build(); } result.typeSpecClassName_ = typeSpecClassName_; + result.numFlatComponents_ = numFlatComponents_; onBuilt(); return result; } @@ -833,6 +865,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.TypeSpecProto other) { typeSpecClassName_ = other.typeSpecClassName_; onChanged(); } + if (other.getNumFlatComponents() != 0) { + setNumFlatComponents(other.getNumFlatComponents()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1183,6 +1218,44 @@ public Builder setTypeSpecClassNameBytes( onChanged(); return this; } + + private int numFlatComponents_ ; + /** + *
    +     * The number of flat tensor components required by this TypeSpec.
    +     * 
    + * + * int32 num_flat_components = 4; + */ + public int getNumFlatComponents() { + return numFlatComponents_; + } + /** + *
    +     * The number of flat tensor components required by this TypeSpec.
    +     * 
    + * + * int32 num_flat_components = 4; + */ + public Builder setNumFlatComponents(int value) { + + numFlatComponents_ = value; + onChanged(); + return this; + } + /** + *
    +     * The number of flat tensor components required by this TypeSpec.
    +     * 
    + * + * int32 num_flat_components = 4; + */ + public Builder clearNumFlatComponents() { + + numFlatComponents_ = 0; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java index 758545623cd..eae32d163d9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java @@ -72,4 +72,13 @@ public interface TypeSpecProtoOrBuilder extends */ com.google.protobuf.ByteString getTypeSpecClassNameBytes(); + + /** + *
    +   * The number of flat tensor components required by this TypeSpec.
    +   * 
    + * + * int32 num_flat_components = 4; + */ + int getNumFlatComponents(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptions.java index ac974d89c17..f66bb60e091 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptions.java @@ -159,6 +159,10 @@ public enum DeviceType * TPU = 3; */ TPU(3), + /** + * PLUGGABLE_DEVICE = 4; + */ + PLUGGABLE_DEVICE(4), UNRECOGNIZED(-1), ; @@ -178,6 +182,10 @@ public enum DeviceType * TPU = 3; */ public static final int TPU_VALUE = 3; + /** + * PLUGGABLE_DEVICE = 4; + */ + public static final int PLUGGABLE_DEVICE_VALUE = 4; public final int getNumber() { @@ -202,6 +210,7 @@ public static DeviceType forNumber(int value) { case 1: return CPU; case 2: return GPU; case 3: return TPU; + case 4: return PLUGGABLE_DEVICE; default: return null; } } @@ -278,6 +287,8 @@ public int getVersion() { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -292,6 +303,8 @@ public int getDeviceTypeValue() { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -963,6 +976,8 @@ public Builder clearVersion() { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -977,6 +992,8 @@ public int getDeviceTypeValue() { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -993,6 +1010,8 @@ public Builder setDeviceTypeValue(int value) { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -1009,6 +1028,8 @@ public org.tensorflow.proto.profiler.ProfileOptions.DeviceType getDeviceType() { * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -1029,6 +1050,8 @@ public Builder setDeviceType(org.tensorflow.proto.profiler.ProfileOptions.Device * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. *
    * * .tensorflow.ProfileOptions.DeviceType device_type = 6; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptionsOrBuilder.java index ca6fa82f415..c402a805678 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptionsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfileOptionsOrBuilder.java @@ -25,6 +25,8 @@ public interface ProfileOptionsOrBuilder extends * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. * * * .tensorflow.ProfileOptions.DeviceType device_type = 6; @@ -37,6 +39,8 @@ public interface ProfileOptionsOrBuilder extends * DeviceType::CPU: only CPU will be profiled. * DeviceType::GPU: only CPU/GPU will be profiled. * DeviceType::TPU: only CPU/TPU will be profiled. + * DeviceType::PLUGGABLE_DEVICE: only CPU/pluggable devices with profilers + * will be profiled. * * * .tensorflow.ProfileOptions.DeviceType device_type = 6; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfilerOptionsProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfilerOptionsProtos.java index de30001ade0..ad5a639d22a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfilerOptionsProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/profiler/ProfilerOptionsProtos.java @@ -34,7 +34,7 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n/tensorflow/core/profiler/profiler_opti" + - "ons.proto\022\ntensorflow\"\355\002\n\016ProfileOptions" + + "ons.proto\022\ntensorflow\"\203\003\n\016ProfileOptions" + "\022\017\n\007version\030\005 \001(\r\022:\n\013device_type\030\006 \001(\0162%" + ".tensorflow.ProfileOptions.DeviceType\022\033\n" + "\023include_dataset_ops\030\001 \001(\010\022\031\n\021host_trace" + @@ -42,15 +42,16 @@ public static void registerAllExtensions( "(\r\022\033\n\023python_tracer_level\030\004 \001(\r\022\030\n\020enabl" + "e_hlo_proto\030\007 \001(\010\022\032\n\022start_timestamp_ns\030" + "\010 \001(\004\022\023\n\013duration_ms\030\t \001(\004\022\027\n\017repository" + - "_path\030\n \001(\t\"8\n\nDeviceType\022\017\n\013UNSPECIFIED" + - "\020\000\022\007\n\003CPU\020\001\022\007\n\003GPU\020\002\022\007\n\003TPU\020\003\"\320\001\n#Remote" + - "ProfilerSessionManagerOptions\0224\n\020profile" + - "r_options\030\001 \001(\0132\032.tensorflow.ProfileOpti" + - "ons\022\031\n\021service_addresses\030\002 \003(\t\022%\n\035sessio" + - "n_creation_timestamp_ns\030\003 \001(\004\022\037\n\027max_ses" + - "sion_duration_ms\030\004 \001(\004\022\020\n\010delay_ms\030\005 \001(\004" + - "B8\n\035org.tensorflow.proto.profilerB\025Profi" + - "lerOptionsProtosP\001b\006proto3" + "_path\030\n \001(\t\"N\n\nDeviceType\022\017\n\013UNSPECIFIED" + + "\020\000\022\007\n\003CPU\020\001\022\007\n\003GPU\020\002\022\007\n\003TPU\020\003\022\024\n\020PLUGGAB" + + "LE_DEVICE\020\004\"\320\001\n#RemoteProfilerSessionMan" + + "agerOptions\0224\n\020profiler_options\030\001 \001(\0132\032." + + "tensorflow.ProfileOptions\022\031\n\021service_add" + + "resses\030\002 \003(\t\022%\n\035session_creation_timesta" + + "mp_ns\030\003 \001(\004\022\037\n\027max_session_duration_ms\030\004" + + " \001(\004\022\020\n\010delay_ms\030\005 \001(\004B8\n\035org.tensorflow" + + ".proto.profilerB\025ProfilerOptionsProtosP\001" + + "b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index 523f9d0a0c7d89f3f38e9c98e61be63d62aac4a2..1b3d72757c53a4dca894cc28cad84bcedb7fe121 100644 GIT binary patch delta 18234 zcmeHv33yf2wSUjK)4BU5cfKbBAslXzA%qJF4h&LA7z7l;5K+UB9CA;RD|gBrfCzYj zsyL%XU24=Syei75h}+=_($)@G2lVyXKC9LCwZ7*BRconQ{jYt_9g}b!I=ueBuYR8p z_St*wwb$Bvt+jt^?QA*G^0OCP_TM(cv)M$P4dF<*ClpD>XUp-fW_e{0x|b3+Z2zTu z1f*6H!?-=h0XivmU8Q@V_L`+|kIp>!>?zG%iWF1gkz}l0k-FrtKd4BFh?KzQu(BrD zBelmAIidLRTfA#b0w>P)hlPH!Dm7-h+958|V+a4; zhZAX};_FQL(!;}%1P+#hk;p3Snv4=0jq;L1ii9)fL?eadu-vJ{M)~p>Omi>E$wVY1 zCj#wqFxW%E|?T+^i>1zcYr2I5jQA_l?*Alwy%JV?y%9IB_KAS7OWrhLl*Ra*D~MhG3AS$u8_QRD3ft zeI%LaKR-6&r0V>sCiUkG1Gv%B&5#A4xP5xG;1qy*ZR?jNO>xe{7OdvO^45G z&nVEDR%E@AaLrDxnf*QA$<42CEL5iDj66Et;q#NBmAKeQhqOlSiRTs_#m1~?ef2Lv zH!+DRo_xdmDAyECpuFXv#P$Un8VhuGC8VhA_apacJ@UsglJP_=5Y?8*yb7|8gH_@B ziKbH3XrbNA(g-DyG_!rm+I9uk&&K&v8C^?p^e=2?)~pd?Ml2ajkZ3G`D$Gn8W#Y^b z^-UVBZ9b;&yge}RZfg9Et^tjA6wPFyy-S@Xg*zp!6*h4Jk*u27m(IXyG_6d~@n|rR zh!Z)1B3Oxjs#X&Q3MnJ4NLre~o?7%KXq97u1kRJUGEGG2iX4e9+(;xlG!cP~ZoBC7 z5t4DKA({CasZ$9nG49x`s%a>zIeXTTJZ8@`6I!kStP%BMTTk7yz-FUUFjt$gC*5S}PHkMIr%ZZ>Z zJqHe|$B!xe%2wN!Sxn}89b-Wq%p{!n^3|oxE+6Y_AtV0={ zYm1P0f?n(5c5(e^bke&#+)W(!T{^yudy>=bU5{3=If8#I8Sf;-pl&jBElxWrX^&tC6~+)G5s9JaaT9xI zERu}2%BrhEBc6b_4)E1RE9c^MW_bP(KWY`_2d0g_We6GEo}Ja#p8>BmJ-su{MO7gS z&H{$K!?ul^G7L69#Fs(CL4LGj+Pq{UiT@8t>-nMX-60^1`Rw+H@I;TMN zF*&>nf3gyyePD*aTvyp|Kf#JUeAALw_%EIW86HDM56JMqTzTQr!+e?5TcUB}RCxl_ zR%LZf(M<)`gZzs5z8qC{l~I?Gj&CxD2E7ck)@l&bl9ND-I#WyD%E=)({y2Z}X)o!C zBPYxSN(?V2?4nmqs&WgXYo6fic>ZGpec(ylS)(4%sx}&n@G884xr~LewMYR|LND?SVbye&(nMb z?7V@L)BAtOH;m%mhqvG5uPI`ke+E78@jogssdVabUT4t%+C<8r>5q8Amw(9rWW0AI z8H|x*E8Zj9m0(caBdzVh2wr4b+q(kMxKoF5c_m-|4b@aXYed-YpNtJ;>>ptG7cUd`}4(ilZ?B%!U;VF;49OirUtBU)Q-_gS! zy)EmORF}EsIZ==Q8|ZVQaigE#ZGr{sjE9Oy??viZZ(N%tVq|`I(tLQ~65}Yi%WJgJ zA8auC3ICB1c3)|%gg5`)V23-dG0uY9t~8bu=kFD|>q_HUd`2@mBn@?M63;DeqY>s+ zy1h_+jd1~guO6b;7;hR<)Ng*kvrbho;#%YXW(cYaruOTM^_7+@OvE}1RXR_O0_Wfc zfn79L!`Xtd99{@oR#cuc>^ML_yU%!IcKs@o8zYuE^yT}FXYS>17pUz*(-yAyNED+O1BLv55ca%8tP(lcuzsxk=W!X|f+~=^Eqp=4*Tj&@H z`xVn!Aa$6g(AJj?GFN2Fy+beWHhtWGUqIWrZWBEFbAuT^e#P(zy(nq=<0k%lW?1ki z(`!ZO?V@!jO)q7opHty^ADgc6q&D)TVon@G_L+D)Q*Tu{C*sH8p(+q>WoALid~OY7 z9jd>aY&Ra}iV#4dVDyCJeW#eCL-E&~U3?N}Q5GgOT+pSk2t#7mj_e{BV3)@z>_5p@ z)0b^R-8}C1)Vy3ckE=S>iP~=b+ze#7@J+gYg|MPf5$fJQ`k~<+j#}G=l|%JA1mcAU zuNS=cU3!(E*Z)EwqoCmyp-h+;VcRJgf_uIr)WY5P5iWd0*r>nU)UU_$y}~eCgJ159 z$^PuH@L^#Vys%9WVEYroPcn+rS5NstOl`ok*Y$8J$r$ZriNVtv8*e3j$r@y{kSR?2^Gm%DJ#E%8H$c*QmbCJWZc5 zY-|Tp?tcthIC`|o+{Kw*F%UAaWxEWbZyAObIUGNv^+U`v3J=GLw+(?UuSsV5nndnj z;QKyCiWs3QYtM_cuVUti1wd=d-58n>^~csAekk``&FO^yYNGqP$<3EoUe^`D$CXUcL_XGW%;zo-rpk7X9ioEbs1@EtPB}ydB3FOh=I5l$T7UDC_(M& zI>l%CNfAa4P+_cPY2mWq`YKBmO^vg>RjPl#3_}`ug-}61IbpD$+Kp^C?($@M9-L~S z#te5kYxeLg%UT`Z>jJsWQU$fkg$u#jY#FQiHyC@a#Z_d@Z`;*Tbmh60lLgxh)bqZ5 zf#oBfyBXTrEQ8_XM2CmD&mqvZTo}gY*k42dKc`2UEqe!G_w_Ezcpbd8!s6zy8pLKF zOb54GrWLbFHQMk@XuM1f|%<4luNw`Fgvt41%sg|9k zW8)lywPpS+YB^bCh4<4RcUv6Uy_O?Mf03{})@y#(Kx_+QN{0eN?$uYr{xYi`LK7S% zYJ>!BiyVz0I!YE5nc-8q%2Beqkm=W*CEVp(_T5%O_uW=%8m)gyAf?d28>7V|k$rUB z)1{lUq_lAEba?wmr9*3R1@K@O(DX zJa~V_sbIMCX|69A4sZCat_B2A8MmhZqtnPY@_j1kIj&;Qvzs*Rxi7S=A(;%1%dIhk zG1s!Vs}DHtgN=j4ss&k$c~yaM@&x5wIKLmjtSzJfB=$**yX2?>e-LzT7pC|yV@CXe z)q!{bvlqVl2UPUAouSWLzasjK7_qjvG>D48upEu52{}aG6{0f3GF=RT4#YwF(B(M| zqs%PQtZ{=48Ez(>t?Dce+%T7GLILqp0LiZu0Lki%aR%I$X&`JS6VM+)&J(1Rhae+l zn#G~}qQ_c}GYyz)YDVFzNe;rYc-ikvsb?3aJpQIIat4?kUn{9#G6QKDhFmL6_(E{_ z%VWbUi-N-{8q63_L25*v$Ehu6upbz>tzz1YF@&$R7KDzqKIs+&vFS~JE;ahNleBk) zRXVjdR8Rlo^vFo-zO3EV+;nub_4O+LJ{>HYZS9o2m82wnt-<7g(q4!t-_665v#kfb z^-`Z+1!LHSmqzDUj|$u|n7qikzKEfIoIbwDI<=2oM3A}yUKl43TGecAZQ?I00r}so zF1TTb^?CkoLh&C%|CQJMXUOYP%ZnR_>pC&2sG$GY7^#U>lnI zpj~2W51@BUUt%a;V(K_*zryxi9UXqX?OcKT9klk?%3#dhwkmjLr_BLtdu>kQP6uv_tz2!Ou{&(LdHUWC+YEu;vETNhj(Q)nz0A>>Puh;^=z~w&!aCaV ztSw~c@3g_sU$sT~T~>O|YqlXA_bF_8&Q?jCuiNSahCk_uWikBWgrOQb=h*K>R0{ts zuU2MgpalY*rCI|F-Dw;Ni|5);816EV(#0@zIo{E5%;_bSgc!8~m~s!ieyLCebdG%? zOzSo^!u}h1gX3}?4#*K*QV&&g?Fnd=9WHQQF{&yf3OPA z7;$0*@HCL;@CfS!m?9YWgLkQ2DiZ%*p{KK!+E*2xT>9WL`wy$RpTg>O_SM{r@a{T$ z6>NIj;c+*ssd1JQMP!EMw0+(waL-o3RW@pg989u|BE}GyeZhhR?1HJoKkO=Xm3vPo zhB))RPVKMCVRd6>pf7}mC*6bL*7f#pm(M}w*{HbGmW(3&qOc?!BXH!H*{zL&V>g#p znhsQ)&%VRZ6($!eQ%TPT)BfW2n6df=^XlkmLEdc_A^rhzq}tM~w)wL8OU*yAx|&SN zFO#Fp)%Uci47W)=kt9Y%tk$PT^WH35ix2|>515(9LLp2jr*p(@i4I?R%UBOBMrcj*-HP|SHlUZ44tQdd zeJhXOHvP9-g6`kCJ+N`J{VXfvBct+26b@~+533`X4%W5`YDfj~e3+NP3JO;9fKpf1 z%AqJ$cwuo6h6^ZSIQF4?5Fu%w6_?f{HZh&B zz25ew+h>zl_GZlZ^n`>Szuew%kAAlSbF6niVvwMDgkwt)*YzfSZG^+$=KwU0ay-i; z8f~w2yjbM)oG^i-)=@z}uXWTFUYh|g1=n#0;Lte7{}cys_vqm8IL9FRlRC$QLN4%h zH(2%&VqM&o?C1z8+R@Hjqe`Kv*5QV!)9s~j+jvK3ktO~o)t&BmI?JGT_n8OXPOc2z zILpySUc#h}Hi?$5&CC%lejB<2-k9Kcx!^K{JP(GSPjuM1kKp$c9sf~$?U(83kx7oa ze#@?*RcAZiFyoc%vzd-dimdEzx^9+ZWOlrY38+Z=n@%Hr@*Kw=k^79UTH*LXF_7tQ zTG!@?WyM1=i+wS-#jH4}S#j=ol)u&l(P~!({CKeId)!WXakcBEv7qz2+%R^&%Lo_D zcOBrLGC_Bfs~T#WT+<-bw7Ue)qcKy57jc)%Ux%Wp<0ID#Y6=S%he$ zXFuRRej)#|4#wy_8_)8(iGbj9M<=9|&kpC1l%C*g*pBv6C~hB%+x?tu)A>{e&V0V_OppGObZ zc#Y%f+r6IdR{gI$h>ED`hr2%YY%ZeRUi!_yc*N}HS6MqaQGC}}@;#mi7ohEqtZ9lu z$0wdkhxBr}77Vd$q?F}JeA!MPf8t5e;U-bErFI%g2^IumjfR#Ub)K6kl$hEOi(&Z? zmQP`UX(T*G2}KiFs)~+G^+q!Ls)i9UHFuoBB4Uc_=`i5HLXX+~%Q~F(YSJZ+o-MIP z8Ht5aT16G}PAVle@vVAC1O>EuIA6IALY!`BN+){f5VJO6Yb2VvEqM{k!TdEFiFJY@ zEap;EqrOR4a_H2}3xx3xc`g2t3_5iUe!o*cPm#kl(giU&ss;$sh37S}5BIs zM`HE!B*`B^7mdCiFVQlW0Ebtsf_*(=c?x~L#t3G-u!yEWigu0(U9EP^5cM`?1glk` zRQ{Fn)%m;^VwK8i^#$_$QsRUon;caueffgS$@G-6{6c9p_K{Yeut-i=+qO1T@*fBw zmy2$?NE9D1*xoP`3+};8DJXoaf((ZDs`MqWwt|#?nF7?*cH{4a0wTEWt)zr$AVb!B zIp4QBvhsayPJZcM8VKu;aIqk-Sox>rYuA6Co5tYMQP^Kq|EcPA-l-}*8J5?s)$snS z&=c#1GX@K{W)9$s`x=iH;B!pt|Byn@%vFck!E_C4dQYsGf=?L`%*|GM{!Nj&Y`tU# z75o3az%x_U`B*72X;x*mvyz&t#LJsfKQs|LwrOTQeUbeKdb}3q^1hUbMvv#LC63J9 zGYddFJu|XcbnXWKck++J3jJe&)N(#@Va`Vyn3K#i`|w#vG^WyvKsUKr-(URbDzPqv z8R`Q1e>-<+&cq|CyG(s`cZtx+K$_^UoXUZfQ-f9ccziJ`aT+6VVzSQK@l}2Am(oOI zo+buTgpg4Ly5I^|OMm|9b*sed4E%5PFeN4qfyv$C>D+LLc8kMF{SDkac@@5X2*b4N z#A>LFi!}mC*WKR|6DK43baqVKlIh?01^p+&b#XDCdd*Cn>^z1$8v}`Rv?}#T%&8Ew z9PEr>>3dhmxuh8_;(|r<&p})v-NxK8f+Dr}4uZv^1YsA%7W|QruOZXLw{F4gIp;N9 zSSvNpUNm#&tVI{qN)6{XH?=OFdEU(F%^TPOOQuL^x&Wm$jv=GsT$wJc!LodY)u1<4 z(VN2AebOv!&ymGETpaCl5TA14vo0xl=IKl8m#fA5Y`n$>#?Y*N{BmD;nR7x0WLq|Z ztxek-NCR=V3__Kw*jFQWud!MEj4=(-y0@~L*fNYV(_71H@F#kr3Ip)eADnu4((kG? zX+FdU&xKsWr`8N!%+`gMwYVnHmB@Q~sz}UNe+Ad)#24xXxZz?s?DC6^jp0rlnj5*@ zxXmz-Ch>54h)X@(g`~V;I9BT_Yxz4V*Snd6CbO|RRrwFlsQ0XlKGUFSPOB5x);P6V zYs&N0>C96taCXiG5BXiE!7@b@;0Kd*;h`qQ=)}UL*Ht{sLX{%Uf^A08UWH|fM5*&id&DYF@JzK@;DQS-S@)1o)9JikO zE&wyGav0z9utO$G@#IP@BviM)hDB!G9o`ZVI9?TqsxNo)W@yDO@mlrG2n(1{$SO-2 zs_Y_|Ig>-!y)uoy`Vr&A6O9G4EaZ!$OTx0u54Vc#we=$lR-n>%=C4U@sl=r7q{3yP zrTafwR^_ReMSE`$m-3@;H4}oyRn1RhT|?djjIUkQfcLDsgnX)a&B8$Xr}}v`Gv&`sp7>8HLBLW+yl6u7Tk-{S7mni3W^^3 zlr5wRU%utlEwJ0n*Qr}jk)gc01=6<|rSSFJU-|s)A5*uG`uo={u)1hGJLFA5%2cBe z^3O`L*ICS(gJqjI(}YD$I<$(=&?~sC0~mBzuuYuL@7KY?TM@c>c$?_uQkbbz*wb8= zUQ^LGO#h(6i{d3GD#zHC-IbJ4`BPhH}*szuk*beJ;Jl4F5T{*y>8*QCCaS0BzJ zPnn1J>8TKmL(}~{zCC#5Ch@Wv^%~<&#(kxw{PWCz+*dH_uyC7rp04qqWp%2$45+U= z%y9e`G4zGX;cI4te6v^&H-K2iKf=TPApUy_tC+r`dels9KlS9D+jJ`Mi`Davp1_G) zFksw#t9ZrCzlDx+D-?B&|MyE&bwtJ6#XC*>LuPvL97H zgzzaZzFm%JcjqvgzjQu2&VGM`8n*kG$5x9HF>9|~h?mr}EVe9VNU3Kco{UB#F?<(+ zh4uI=1z04GzX~Kv*r(rad{Yk_cZhBS&+gas&K=_Xe;V%2NL!;8Ow0!ot!hS`+-@eW z0T%C+GN>&ayGQi6>M^Y_nOJfI<4$;Qhgf695BRzxQ?0qJ``!u&Z5Oxm?-Xi;$&%%@ zqj+INZDgd1n&>xq`j?2LWzBAbM9mDOHRlg>bYNCIoX}Kek^!T%d9^0q8|?J5XC_=3nh>*4tRHmSPPp5iG%q1 zW(aQ=%iyMq#Rz==-^5Y>EW!|dnb+4X`K;4b-xD9RnYI}TX$eHZ@swdStayVo!nd3T b>#1KrouoStipD;P&oi~e=Z_x|r*Z!el5(w0 delta 8590 zcmb7J3wRS%_W$n9WO6fUGm~Z#`l6kdXA3R12+EsMup&}G_#+@o8`_}_v`tCUTHaI_ zSXmIT*khpx_+SO4ML{o0Ek%o>z`6*cuhhMk%Zg^^d0l>WiIS?!^v(DB7iB1E%52r|%FCSPU!wF=df)nxG0UIQy}Qu8 zd#TPRFsr6~O&92~-D;nx6EQxOd5ipM8B3Ht?PxPo*g%o?7vNCo^15BKOH@sqTfftk zfP|@v#=oBe>GjqwdtWMjaLV4McTM-8S7>V_%15-9h|XEKPYpr@%>UTf4+h1dT*EW` z8F(fReFu&4sF?f6C|KdT*^&w4yQ0Nxa6A2LSM(y%MvJJVpp6LC)8~`Wn`o8V4N-a=kGZyUGo{|!^!0)b9M-{ky{s+{8O34r# zD?}IE8b}}I^|%V@vNy2I*mJO?33q{)4&V>aN?6>4D)X%S0T+eG%Tz0XoE%Dfj>ngU4I&eDrPY zp^i(TgV>IzX40XD@F0ZVh03?^8FU#YzK!KnL#$n}wL_5O@)xV#SXq$T@ao^-Y>|)z zYffN7x4n%C!hbNrp2Jv%=2rX-dLJ4O<7lTtu!k|5nh9*>e0b;t9z>VE!<3CEtbY&B z#m6|<@E*3C8Ne=r!(U!O6W_-f7(dC=2ae%fgbu^@<5=zyD&zxdVMT!&8!gD~7K0N- zLTW%hVDe3*S--~-2)#^mKfue_6VOR4$7ZsnI?AZ?3d%^CjL#!#{Rmg4pcYth9sicO z&nwX927U*wjv+Cxb~jw)@V7ijM#E?PZxDKhe~c#ahI=vHO$-ls(5JOkhEdSa%V4Kd zs|+y>_#*?%@5wzH@G^W9YcJ8zrx$l+)sWsC`!muZz4_&Z_y-=|T*EiB<@VFswR{!x z)?662hEJ*OZxQM2b$p?KTY0G7#K%I93w8-gxA6DCLl^83rhpiSjDaz_WD_5c@!%YA zeI$2<;*0jK@O3>u5&z2oSzGvx`s6cMdx;0>Y5q(2v7R4A|8Fb*O^k4g7p(V`xcuW? z;7VZQ=hyIkAor@Vn2A+KW8UC5Ao$wI-%BH!_<;@hf{BivYg&c$5IRf0oM$@4Vb}A2r=T+%x=# zCSX6EP?l`7(E1goPcUR(HlC%$kD0!&#@`XhvyeYCi8jz_5#(f0qP14?TL<2008bCn zh2GhNM2|=N=y)%=Ut{?%)*pGxD&(N8J0WfzABQL#Qn`I^o2dOB*B{F zbroh66jYRsR%cff0jVWvTvUwUs2N!(!^{I@A$P@eo4M;6Ni4L~606jaN%B<4+Dc+z z|BECE&H#yrTp+*Wk{{N&8hlX!?h(G&oT;vo3RUsUQ3A`QyhN-T!E-1RmOe!i?8?J# zf3Z@iNv8NJ%F8`oKW(We0}!03BQl;5;6wjfM|yMI3PHF7F4mFcP)NIXk)Dwi<%Mj= z@I|AJ*w&LEk=zjiGuS}kPMG*C=>pg4h!~1+hK?|CGr7VgFY1U3rtc#WZFoj*CJId0 zDE_8z-}_mzRI&JY9#p{#mk}OJ3L!GBf3UotW~T zO7VBb0UBx;u}$avgL3H4B5Gu7UIo1$?-Hbn6J1TSUBp4Ze1-HP z+*&h>yK)*HtStW|boAF>AbY8LK->&+w@n%9c9P)o8j6cUM-P3`B0!y6_H z(IQa>>p?;~0SWiOe-7KN@b+$(`7gQS3p(VeOkF^>!$4#pHjQ$vMP* z!Le+}p!K|?qt8WI1)(jEvFvITP^JDT4b8RJagawi(!s*rw~#tR4H>O@Z8W5{5Q$4p z@94D12N}02dTm_<_KeS{U1C`x`kpa)w9O6!(__Oh0m?Mj-GjkJkW0Ys>6Z-zC9w! zP(H;FO=lhqs7AZv0AHOrkAk<117|bI%>iMs6C8E>K=3>O}Gzvm!UG@svMbB8w zEYZ%oW-MN{HPTGCMhZJ8#xz9==J6G*Nxa`vFfYn17)!O6XgL+Qky9~BaB6m_Xa1^^gjw)Qt`h`aNKA)$_AgOdwI(iR#J}F%DP0@Vzm?-gv(~I zS}ab85G-G?`s%WYvezkwo3kjJbs_?-15cdgJ2qZtfbuwtlWvT+Fkz2cE%DTvV7VBH zTe?72nk5d-%90Gq-Ii=Pi1`C(109)WImzqAX2qSBDg*v93PujHbcKk!E&pa5Pe_4R zI&=_wg^88qUi##4%N30O;?xG>>CP+*(MrDqvi|<%D9eT*!%xRrat!$6SbZRCyd_JU zc!d=c8sQtcmN=rX%qI3XXocb!w1e(p^`bWKJ0rY2!y;2@rsaG<@i}mIw&itviqMEW z#)|1|9WBnc*sx}m9Lfg0_2>YMzhODe#y)>_ctj(1{q1>7in_~eEiq=*8Rz}c$DW$YnIy=tV zh-se$>-l*6C5Ff$)*@z~>j*XuvF@erq1N|_u$9x8l&dbk&SaazHjP_CTlDl-e!FU)^E3tfC2SZsph59v2=I6 zwdMm%&5%_sI$@Yww1y_u>S8gCi*28DaI2|pdU)Q^HtX9;qlnj&>p-x~6Fc9@=PwfNh>k1~%^Y{m%;ea< zhGnN8>PS>5sy%)&8P=4DksYCNUzplcg@e;JH3lvQuGWQZ9I3PPq&i|XLnxJrNpQ4O zY~oUvb==;d?@BQ;&`Jb=v59&sfd+%*5qoyJYv{mM`NgdU?q7rz^8sM%E5a)h4Oj@K z$4u=B!Z8hE6-xlqHoqu^Dr$R$*vh3Y?FfMxDYr8tX}tj&1YcE(J$?d4&nj^o!3P6A z*9EpL7Q;G%fOU?sfsRP<`vJ$(X~J;?9WNT{7mK0oYdi0|@{4^AmWt1@!g1)bpK>>L zexOA|CXA~UC3tnYxEgJOk=5crGt;YHxI@MQ(E&RfNIczLExv@g<7Q#FZbI#2sLG#T zE_Uk(oI7o5PY~O&LSTI@+v`J@3JpKE?wm)e#a3hL>X4d}8FWTx2EAO(oU{57v4)I! zK=pgwY9-ugB+lD+47#IE|Ft-paDOK^V`=dI@CamiC8Q}>ek=C0`Qi2r#$GgN!=don zVz2(^BJRLy@dfkXARZO2ZM#kLr(@Z&M*Psgy=-Q6I}5tBP-ArxZ_TFQ+;9k4!U!$Y zqG8!uF(#DN2W!RC#?)2erg<>jG>6bANM9$W|3sL1){7q_?mdoWEGBq7tXm(R+_$V3 z6GB|=Pq;lnIE3NC&|45lfN7{Hg0+3a22tWtJFo5Om?y-e2JM(5n934s!c&>&2{9=| zxL0qbGL0ZyxL_)ahR*o0R5lXs*eE7KbB(y+7bi7Fp(V9XHvVK%lWN5>1J_De(H+Rm zeL(CMQpv}L^i>@;RqhPmo-`cVIAN@=ca3wX%IE9B?FMn$&v}2_elq~>drIuQOVM3_ zX`NUq;RXpV?-a{%lSuP+iHXRwHZpg@lrc)ae@?nrtz<`CYQ9qJ@+oDWhn0e2S6Pu- z$S!V*U6pQ+S8U1&$0SDBfwxP3}Nh1bhE@R9|Jt4wi~ zxEV`-F{^lNO?FwDtuj(onR1U#F3{yKisRhqM>r-+N5s8YuuOJUs-s*Igt( zL^-9sbWV(P%8KgP`LvX()xG~P(CQRv9-@37sWpL}_#YiBRmPu;7tE7cxylaiatkFC zYiaii+mFf9GLr8SjJ{%5xf-QEVtZwzjZRml6st<$vv%IeE*%uF>Z>U6v+FGQ=Y7&@ zGz1nOm!mnQOnn%hJ}xVU-^XObACAj+!ZJaWX9&I$cY*2)oE0)nxfH)z=3^@A?Jjfs z-L4X*i-oG!6Hr@OMXBm_7xY!s(sKWTz$YI|$z??;Y;8eXyNF%ulqvZEdB8STa`sAB zn1oEVo^q8LKr@G?Qi5+Ragk|d7lzgZiFJ;$$Ag3fSo^G!aJdage%up`v__>0e!ZT-p9=XzHSxM9G^9o!Q zK2>1K^0DI<1*8d5!ic^K!!}zxGxb%JmNEjSibD6CIjZIkipN{1dWBNg0%d_)Eh%g_ zqU+j6lirrEB2!RckS0mpBimFtdw~GwUX{DS%ENLTJT^&+fcV$sSlT#A`Vm3ayYgF9 zogyV}=YHUXh?*tm6?ox$X%#E3kJJ0ENQHt+{PkqhSjI4 zg?UA4nQmvHl@p}N#T9erl)$zuOM)iD?f@;mh-ojumJuR=mDXEqpP5-Vv(#Z*$OKqP z?M_>7q}^G>(6TPJ1}8d3yQbUT);hzL^lCqwm*E~7z`bf)e;eJ_KkA#;(POaTa@0XO z`KzcC{W$H85GHlA+hIt8{Wb1wlMoNf6YcR}OSF$PunQlUlxR=AHLWYHPqbGa!T;ev zzF?oM_eW3D6&LKgSc@}OZ*kaN(4ctfiJp%6%xk`7opvur9pq&4)9Kvajh^;?XWZ#~nMEnX2hq z#~n@s{pTMXmw9SB<8X@f-b;=aBmKwsjwdG2F=L%;jHXA8Lf2^}E{L4yjD?3h&Nzsi z;EZA2$h?Wpp52x4OwC$Ct}1aUvpp4Mg=%5u8l(jO6)q@ql>(aYlwrvPXR@UDSnsdp zqu@APn&=#4X6FUC-6DpG&Z3{&RqN&rsa#B1GTN;2~PVS1H+a&OPF{`i=FoyM%^42 zW*7tK3Bpbut}J(!!q#f%INXFlS>|-Yyk9$aXzLlE=cCT8jCfnMbF5)h`(gUq70zc& z_%#!3Sm(6UQEQz!2$g_R<1As6mkfc=0^57oB{I`>I;~mfWS7VK40~|(8SIF|?avFG miO{9B&eui!nGpuEp&GByJM diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt index d1ca721acbb..91501df0c6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt @@ -748,6 +748,22 @@ op { } } } +op { + name: "AnonymousHashTable" + output_arg { + name: "table_handle" + type: DT_RESOURCE + } + attr { + name: "key_dtype" + type: "type" + } + attr { + name: "value_dtype" + type: "type" + } + is_stateful: true +} op { name: "AnonymousIterator" output_arg { @@ -2641,6 +2657,41 @@ op { } is_stateful: true } +op { + name: "AssignVariableXlaConcatND" + input_arg { + name: "resource" + type: DT_RESOURCE + } + input_arg { + name: "inputs" + type_attr: "T" + number_attr: "N" + } + attr { + name: "T" + type: "type" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "num_concats" + type: "list(int)" + } + attr { + name: "paddings" + type: "list(int)" + default_value { + list { + } + } + } + is_stateful: true +} op { name: "Atan" input_arg { @@ -3370,6 +3421,7 @@ op { has_minimum: true minimum: 1 } + is_distributed_communication: true } op { name: "BatchCholesky" @@ -3451,6 +3503,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "BatchDatasetV2" @@ -3489,6 +3548,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "BatchFFT" @@ -3625,6 +3691,7 @@ op { b: false } } + is_distributed_communication: true } op { name: "BatchIFFT" @@ -7116,6 +7183,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "CacheDatasetV2" @@ -7147,6 +7221,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -7489,76 +7570,19 @@ op { is_stateful: true } op { - name: "CollectiveBcastRecv" - output_arg { - name: "data" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_BOOL - type: DT_FLOAT - type: DT_HALF - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - } - } - } - attr { - name: "group_size" - type: "int" - } - attr { - name: "group_key" - type: "int" - } - attr { - name: "instance_key" - type: "int" - } - attr { - name: "shape" - type: "shape" - } - attr { - name: "communication_hint" - type: "string" - default_value { - s: "auto" - } - } - attr { - name: "timeout_seconds" - type: "float" - default_value { - f: 0 - } - } - is_stateful: true - is_distributed_communication: true -} -op { - name: "CollectiveBcastRecvV2" + name: "CollectiveAllToAllV3" input_arg { - name: "group_size" - type: DT_INT32 + name: "input" + type_attr: "T" } input_arg { - name: "group_key" - type: DT_INT32 + name: "communicator" + type: DT_RESOURCE } input_arg { - name: "instance_key" + name: "group_assignment" type: DT_INT32 } - input_arg { - name: "shape" - type_attr: "Tshape" - } output_arg { name: "data" type_attr: "T" @@ -7568,7 +7592,7 @@ op { type: "type" allowed_values { list { - type: DT_BOOL + type: DT_BFLOAT16 type: DT_FLOAT type: DT_HALF type: DT_DOUBLE @@ -7577,26 +7601,6 @@ op { } } } - attr { - name: "Tshape" - type: "type" - default_value { - type: DT_INT32 - } - allowed_values { - list { - type: DT_INT32 - type: DT_INT64 - } - } - } - attr { - name: "communication_hint" - type: "string" - default_value { - s: "auto" - } - } attr { name: "timeout_seconds" type: "float" @@ -7608,11 +7612,7 @@ op { is_distributed_communication: true } op { - name: "CollectiveBcastSend" - input_arg { - name: "input" - type_attr: "T" - } + name: "CollectiveBcastRecv" output_arg { name: "data" type_attr: "T" @@ -7665,11 +7665,7 @@ op { is_distributed_communication: true } op { - name: "CollectiveBcastSendV2" - input_arg { - name: "input" - type_attr: "T" - } + name: "CollectiveBcastRecvV2" input_arg { name: "group_size" type: DT_INT32 @@ -7682,6 +7678,10 @@ op { name: "instance_key" type: DT_INT32 } + input_arg { + name: "shape" + type_attr: "Tshape" + } output_arg { name: "data" type_attr: "T" @@ -7700,6 +7700,19 @@ op { } } } + attr { + name: "Tshape" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } attr { name: "communication_hint" type: "string" @@ -7718,7 +7731,7 @@ op { is_distributed_communication: true } op { - name: "CollectiveGather" + name: "CollectiveBcastSend" input_arg { name: "input" type_attr: "T" @@ -7732,6 +7745,7 @@ op { type: "type" allowed_values { list { + type: DT_BOOL type: DT_FLOAT type: DT_HALF type: DT_DOUBLE @@ -7774,7 +7788,7 @@ op { is_distributed_communication: true } op { - name: "CollectiveGatherV2" + name: "CollectiveBcastSendV2" input_arg { name: "input" type_attr: "T" @@ -7791,11 +7805,6 @@ op { name: "instance_key" type: DT_INT32 } - input_arg { - name: "ordering_token" - type: DT_RESOURCE - number_attr: "Nordering_token" - } output_arg { name: "data" type_attr: "T" @@ -7805,6 +7814,7 @@ op { type: "type" allowed_values { list { + type: DT_BOOL type: DT_FLOAT type: DT_HALF type: DT_DOUBLE @@ -7827,59 +7837,11 @@ op { f: 0 } } - attr { - name: "Nordering_token" - type: "int" - default_value { - i: 0 - } - has_minimum: true - } is_stateful: true is_distributed_communication: true } op { - name: "CollectivePermute" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "source_target_pairs" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_INT64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - type: DT_BFLOAT16 - type: DT_UINT16 - type: DT_COMPLEX128 - type: DT_HALF - type: DT_UINT32 - type: DT_UINT64 - } - } - } -} -op { - name: "CollectiveReduce" + name: "CollectiveGather" input_arg { name: "input" type_attr: "T" @@ -7893,7 +7855,203 @@ op { type: "type" allowed_values { list { - type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_HALF + type: DT_DOUBLE + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "group_size" + type: "int" + } + attr { + name: "group_key" + type: "int" + } + attr { + name: "instance_key" + type: "int" + } + attr { + name: "shape" + type: "shape" + } + attr { + name: "communication_hint" + type: "string" + default_value { + s: "auto" + } + } + attr { + name: "timeout_seconds" + type: "float" + default_value { + f: 0 + } + } + is_stateful: true + is_distributed_communication: true +} +op { + name: "CollectiveGatherV2" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "group_size" + type: DT_INT32 + } + input_arg { + name: "group_key" + type: DT_INT32 + } + input_arg { + name: "instance_key" + type: DT_INT32 + } + input_arg { + name: "ordering_token" + type: DT_RESOURCE + number_attr: "Nordering_token" + } + output_arg { + name: "data" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_HALF + type: DT_DOUBLE + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "communication_hint" + type: "string" + default_value { + s: "auto" + } + } + attr { + name: "timeout_seconds" + type: "float" + default_value { + f: 0 + } + } + attr { + name: "Nordering_token" + type: "int" + default_value { + i: 0 + } + has_minimum: true + } + is_stateful: true + is_distributed_communication: true +} +op { + name: "CollectiveInitializeCommunicator" + input_arg { + name: "group_key" + type: DT_INT32 + } + input_arg { + name: "rank" + type: DT_INT32 + } + input_arg { + name: "group_size" + type: DT_INT32 + } + output_arg { + name: "communicator" + type: DT_RESOURCE + } + attr { + name: "communication_hint" + type: "string" + default_value { + s: "auto" + } + } + attr { + name: "timeout_seconds" + type: "float" + default_value { + f: 0 + } + } + is_stateful: true + is_distributed_communication: true +} +op { + name: "CollectivePermute" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "source_target_pairs" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT32 + type: DT_UINT8 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_INT64 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + type: DT_BFLOAT16 + type: DT_UINT16 + type: DT_COMPLEX128 + type: DT_HALF + type: DT_UINT32 + type: DT_UINT64 + } + } + } +} +op { + name: "CollectiveReduce" + input_arg { + name: "input" + type_attr: "T" + } + output_arg { + name: "data" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 type: DT_FLOAT type: DT_HALF type: DT_DOUBLE @@ -8060,6 +8218,60 @@ op { is_stateful: true is_distributed_communication: true } +op { + name: "CollectiveReduceV3" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "communicator" + type: DT_RESOURCE + } + input_arg { + name: "group_assignment" + type: DT_INT32 + } + output_arg { + name: "data" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_HALF + type: DT_DOUBLE + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "reduction" + type: "string" + allowed_values { + list { + s: "Min" + s: "Max" + s: "Mul" + s: "Add" + } + } + } + attr { + name: "timeout_seconds" + type: "float" + default_value { + f: 0 + } + } + is_stateful: true + is_distributed_communication: true +} op { name: "CombinedNonMaxSuppression" input_arg { @@ -8418,6 +8630,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ConditionalAccumulator" @@ -8525,6 +8744,13 @@ op { b: true } } + attr { + name: "tpu_cancellation_closes_chips" + type: "int" + default_value { + i: 0 + } + } is_stateful: true } op { @@ -9630,6 +9856,7 @@ op { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT + type: DT_DOUBLE type: DT_INT32 type: DT_UINT32 } @@ -11537,6 +11764,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -16802,6 +17036,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "FinalizeDataset" @@ -16878,6 +17119,13 @@ op { name: "handle" type: DT_VARIANT } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -16910,6 +17158,13 @@ op { name: "handle" type: DT_VARIANT } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -17155,6 +17410,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Floor" @@ -18615,8 +18877,42 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } +op { + name: "GetElementAtIndex" + input_arg { + name: "dataset" + type: DT_VARIANT + } + input_arg { + name: "index" + type: DT_INT64 + } + output_arg { + name: "components" + type_list_attr: "output_types" + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } +} op { name: "GetOptions" input_arg { @@ -18883,6 +19179,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "GuaranteeConst" @@ -20192,6 +20495,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Inv" @@ -20395,6 +20705,13 @@ op { } } } +op { + name: "IsTPUEmbeddingInitialized" + output_arg { + name: "is_tpu_embedding_initialized" + type: DT_BOOL + } +} op { name: "IsVariableInitialized" input_arg { @@ -21348,6 +21665,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Less" @@ -21657,55 +21981,6 @@ op { } is_stateful: true } -op { - name: "LoadTPUEmbeddingADAMParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "momenta" - type: DT_FLOAT - } - input_arg { - name: "velocities" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "LoadTPUEmbeddingAdadeltaParameters" input_arg { @@ -21752,7 +22027,7 @@ op { is_stateful: true } op { - name: "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug" + name: "LoadTPUEmbeddingAdagradMomentumParameters" input_arg { name: "parameters" type: DT_FLOAT @@ -21762,11 +22037,7 @@ op { type: DT_FLOAT } input_arg { - name: "updates" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" + name: "momenta" type: DT_FLOAT } attr { @@ -21841,51 +22112,6 @@ op { } is_stateful: true } -op { - name: "LoadTPUEmbeddingAdagradParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "accumulators" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "LoadTPUEmbeddingCenteredRMSPropParameters" input_arg { @@ -21981,21 +22207,13 @@ op { is_stateful: true } op { - name: "LoadTPUEmbeddingFTRLParametersGradAccumDebug" + name: "LoadTPUEmbeddingFrequencyEstimatorParameters" input_arg { name: "parameters" type: DT_FLOAT } input_arg { - name: "accumulators" - type: DT_FLOAT - } - input_arg { - name: "linears" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" + name: "last_hit_step" type: DT_FLOAT } attr { @@ -22030,58 +22248,21 @@ op { is_stateful: true } op { - name: "LoadTPUEmbeddingFrequencyEstimatorParameters" + name: "LoadTPUEmbeddingMDLAdagradLightParameters" input_arg { name: "parameters" type: DT_FLOAT } input_arg { - name: "last_hit_step" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" - input_arg { - name: "parameters" + name: "accumulators" type: DT_FLOAT } input_arg { - name: "last_hit_step" + name: "weights" type: DT_FLOAT } input_arg { - name: "gradient_accumulators" + name: "benefits" type: DT_FLOAT } attr { @@ -22116,21 +22297,13 @@ op { is_stateful: true } op { - name: "LoadTPUEmbeddingMDLAdagradLightParameters" + name: "LoadTPUEmbeddingMomentumParameters" input_arg { name: "parameters" type: DT_FLOAT } input_arg { - name: "accumulators" - type: DT_FLOAT - } - input_arg { - name: "weights" - type: DT_FLOAT - } - input_arg { - name: "benefits" + name: "momenta" type: DT_FLOAT } attr { @@ -22165,144 +22338,13 @@ op { is_stateful: true } op { - name: "LoadTPUEmbeddingMomentumParameters" + name: "LoadTPUEmbeddingProximalAdagradParameters" input_arg { name: "parameters" type: DT_FLOAT } input_arg { - name: "momenta" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "LoadTPUEmbeddingMomentumParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "momenta" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "LoadTPUEmbeddingProximalAdagradParameters" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "accumulators" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" + name: "accumulators" type: DT_FLOAT } attr { @@ -22381,55 +22423,6 @@ op { } is_stateful: true } -op { - name: "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "v" - type: DT_FLOAT - } - input_arg { - name: "m" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "LoadTPUEmbeddingRMSPropParameters" input_arg { @@ -22475,55 +22468,6 @@ op { } is_stateful: true } -op { - name: "LoadTPUEmbeddingRMSPropParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "ms" - type: DT_FLOAT - } - input_arg { - name: "mom" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "LoadTPUEmbeddingStochasticGradientDescentParameters" input_arg { @@ -22561,47 +22505,6 @@ op { } is_stateful: true } -op { - name: "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug" - input_arg { - name: "parameters" - type: DT_FLOAT - } - input_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "Log" input_arg { @@ -23203,6 +23106,13 @@ op { b: false } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "MapClear" @@ -23291,6 +23201,13 @@ op { b: false } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "MapDefun" @@ -26259,6 +26176,9 @@ op { output_arg { name: "mutex_lock" type: DT_VARIANT + experimental_full_type { + type_id: TFT_MUTEX_LOCK + } } is_stateful: true } @@ -27244,6 +27164,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "OrderedMapClear" @@ -27833,6 +27760,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "PaddedBatchDatasetV2" @@ -27886,6 +27820,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "PaddingFIFOQueue" @@ -28024,6 +27965,13 @@ op { s: "default" } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelConcat" @@ -28133,6 +28081,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelInterleaveDatasetV2" @@ -28188,6 +28143,13 @@ op { b: false } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelInterleaveDatasetV3" @@ -28243,6 +28205,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelInterleaveDatasetV4" @@ -28306,6 +28275,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelMapDataset" @@ -28367,6 +28343,13 @@ op { b: false } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParallelMapDatasetV2" @@ -28428,6 +28411,13 @@ op { b: false } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ParameterizedTruncatedNormal" @@ -29899,6 +29889,13 @@ op { i: 0 } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Prelinearize" @@ -31155,64 +31152,114 @@ op { } } op { - name: "QuantizedConcatV2" + name: "QuantizedConv2D" input_arg { - name: "values" - type_attr: "T" - number_attr: "N" + name: "input" + type_attr: "Tinput" } input_arg { - name: "axis" - type_attr: "Tidx" + name: "filter" + type_attr: "Tfilter" } input_arg { - name: "input_mins" + name: "min_input" type: DT_FLOAT - number_attr: "N" } input_arg { - name: "input_maxes" + name: "max_input" + type: DT_FLOAT + } + input_arg { + name: "min_filter" + type: DT_FLOAT + } + input_arg { + name: "max_filter" type: DT_FLOAT - number_attr: "N" } output_arg { name: "output" - type_attr: "T" + type_attr: "out_type" } output_arg { - name: "output_min" + name: "min_output" type: DT_FLOAT } output_arg { - name: "output_max" + name: "max_output" type: DT_FLOAT } attr { - name: "N" - type: "int" - has_minimum: true - minimum: 2 + name: "Tinput" + type: "type" + allowed_values { + list { + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + type: DT_QINT16 + type: DT_QUINT16 + } + } } attr { - name: "T" + name: "Tfilter" type: "type" + allowed_values { + list { + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + type: DT_QINT16 + type: DT_QUINT16 + } + } } attr { - name: "Tidx" + name: "out_type" type: "type" default_value { - type: DT_INT32 + type: DT_QINT32 } allowed_values { list { - type: DT_INT32 - type: DT_INT64 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + type: DT_QINT16 + type: DT_QUINT16 + } + } + } + attr { + name: "strides" + type: "list(int)" + } + attr { + name: "padding" + type: "string" + allowed_values { + list { + s: "SAME" + s: "VALID" + } + } + } + attr { + name: "dilations" + type: "list(int)" + default_value { + list { + i: 1 + i: 1 + i: 1 + i: 1 } } } } op { - name: "QuantizedConv2D" + name: "QuantizedConv2DAndRelu" input_arg { name: "input" type_attr: "Tinput" @@ -31317,9 +31364,17 @@ op { } } } + attr { + name: "padding_list" + type: "list(int)" + default_value { + list { + } + } + } } op { - name: "QuantizedConv2DAndRelu" + name: "QuantizedConv2DAndReluAndRequantize" input_arg { name: "input" type_attr: "Tinput" @@ -31344,6 +31399,14 @@ op { name: "max_filter" type: DT_FLOAT } + input_arg { + name: "min_freezed_output" + type: DT_FLOAT + } + input_arg { + name: "max_freezed_output" + type: DT_FLOAT + } output_arg { name: "output" type_attr: "out_type" @@ -31386,7 +31449,7 @@ op { name: "out_type" type: "type" default_value { - type: DT_QINT32 + type: DT_QUINT8 } allowed_values { list { @@ -31434,7 +31497,7 @@ op { } } op { - name: "QuantizedConv2DAndReluAndRequantize" + name: "QuantizedConv2DAndRequantize" input_arg { name: "input" type_attr: "Tinput" @@ -31509,7 +31572,7 @@ op { name: "out_type" type: "type" default_value { - type: DT_QUINT8 + type: DT_QINT8 } allowed_values { list { @@ -31557,7 +31620,7 @@ op { } } op { - name: "QuantizedConv2DAndRequantize" + name: "QuantizedConv2DPerChannel" input_arg { name: "input" type_attr: "Tinput" @@ -31582,14 +31645,6 @@ op { name: "max_filter" type: DT_FLOAT } - input_arg { - name: "min_freezed_output" - type: DT_FLOAT - } - input_arg { - name: "max_freezed_output" - type: DT_FLOAT - } output_arg { name: "output" type_attr: "out_type" @@ -31632,7 +31687,7 @@ op { name: "out_type" type: "type" default_value { - type: DT_QINT8 + type: DT_QINT32 } allowed_values { list { @@ -31670,17 +31725,9 @@ op { } } } - attr { - name: "padding_list" - type: "list(int)" - default_value { - list { - } - } - } } op { - name: "QuantizedConv2DPerChannel" + name: "QuantizedConv2DWithBias" input_arg { name: "input" type_attr: "Tinput" @@ -31689,6 +31736,10 @@ op { name: "filter" type_attr: "Tfilter" } + input_arg { + name: "bias" + type: DT_FLOAT + } input_arg { name: "min_input" type: DT_FLOAT @@ -31785,9 +31836,17 @@ op { } } } + attr { + name: "padding_list" + type: "list(int)" + default_value { + list { + } + } + } } op { - name: "QuantizedConv2DWithBias" + name: "QuantizedConv2DWithBiasAndRelu" input_arg { name: "input" type_attr: "Tinput" @@ -31906,7 +31965,7 @@ op { } } op { - name: "QuantizedConv2DWithBiasAndRelu" + name: "QuantizedConv2DWithBiasAndReluAndRequantize" input_arg { name: "input" type_attr: "Tinput" @@ -31917,7 +31976,7 @@ op { } input_arg { name: "bias" - type: DT_FLOAT + type_attr: "Tbias" } input_arg { name: "min_input" @@ -31933,133 +31992,14 @@ op { } input_arg { name: "max_filter" - type: DT_FLOAT - } - output_arg { - name: "output" - type_attr: "out_type" - } - output_arg { - name: "min_output" - type: DT_FLOAT - } - output_arg { - name: "max_output" - type: DT_FLOAT - } - attr { - name: "Tinput" - type: "type" - allowed_values { - list { - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - type: DT_QINT16 - type: DT_QUINT16 - } - } - } - attr { - name: "Tfilter" - type: "type" - allowed_values { - list { - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - type: DT_QINT16 - type: DT_QUINT16 - } - } - } - attr { - name: "out_type" - type: "type" - default_value { - type: DT_QINT32 - } - allowed_values { - list { - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - type: DT_QINT16 - type: DT_QUINT16 - } - } - } - attr { - name: "strides" - type: "list(int)" - } - attr { - name: "padding" - type: "string" - allowed_values { - list { - s: "SAME" - s: "VALID" - } - } - } - attr { - name: "dilations" - type: "list(int)" - default_value { - list { - i: 1 - i: 1 - i: 1 - i: 1 - } - } - } - attr { - name: "padding_list" - type: "list(int)" - default_value { - list { - } - } - } -} -op { - name: "QuantizedConv2DWithBiasAndReluAndRequantize" - input_arg { - name: "input" - type_attr: "Tinput" - } - input_arg { - name: "filter" - type_attr: "Tfilter" - } - input_arg { - name: "bias" - type_attr: "Tbias" - } - input_arg { - name: "min_input" - type: DT_FLOAT - } - input_arg { - name: "max_input" - type: DT_FLOAT - } - input_arg { - name: "min_filter" - type: DT_FLOAT - } - input_arg { - name: "max_filter" - type: DT_FLOAT - } - input_arg { - name: "min_freezed_output" - type: DT_FLOAT - } - input_arg { - name: "max_freezed_output" + type: DT_FLOAT + } + input_arg { + name: "min_freezed_output" + type: DT_FLOAT + } + input_arg { + name: "max_freezed_output" type: DT_FLOAT } output_arg { @@ -35585,6 +35525,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -36170,6 +36117,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -36214,6 +36168,41 @@ op { } is_stateful: true } +op { + name: "ReadVariableXlaSplitND" + input_arg { + name: "resource" + type: DT_RESOURCE + } + output_arg { + name: "outputs" + type_attr: "T" + number_attr: "N" + } + attr { + name: "T" + type: "type" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "num_splits" + type: "list(int)" + } + attr { + name: "paddings" + type: "list(int)" + default_value { + list { + } + } + } + is_stateful: true +} op { name: "ReaderNumRecordsProduced" input_arg { @@ -36774,6 +36763,13 @@ op { b: true } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -37038,6 +37034,13 @@ op { name: "external_state_policy" type: "int" } + attr { + name: "element_spec" + type: "string" + default_value { + s: "" + } + } } op { name: "Relu" @@ -37230,6 +37233,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "RequantizationRange" @@ -40929,21 +40939,17 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingADAMParametersGradAccumDebug" + name: "RetrieveTPUEmbeddingAdadeltaParameters" output_arg { name: "parameters" type: DT_FLOAT } output_arg { - name: "momenta" - type: DT_FLOAT - } - output_arg { - name: "velocities" + name: "accumulators" type: DT_FLOAT } output_arg { - name: "gradient_accumulators" + name: "updates" type: DT_FLOAT } attr { @@ -40978,7 +40984,7 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingAdadeltaParameters" + name: "RetrieveTPUEmbeddingAdagradMomentumParameters" output_arg { name: "parameters" type: DT_FLOAT @@ -40988,7 +40994,7 @@ op { type: DT_FLOAT } output_arg { - name: "updates" + name: "momenta" type: DT_FLOAT } attr { @@ -41023,7 +41029,7 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug" + name: "RetrieveTPUEmbeddingAdagradParameters" output_arg { name: "parameters" type: DT_FLOAT @@ -41032,12 +41038,53 @@ op { name: "accumulators" type: DT_FLOAT } + attr { + name: "table_id" + type: "int" + default_value { + i: -1 + } + } + attr { + name: "table_name" + type: "string" + default_value { + s: "" + } + } + attr { + name: "num_shards" + type: "int" + } + attr { + name: "shard_id" + type: "int" + } + attr { + name: "config" + type: "string" + default_value { + s: "" + } + } + is_stateful: true +} +op { + name: "RetrieveTPUEmbeddingCenteredRMSPropParameters" + output_arg { + name: "parameters" + type: DT_FLOAT + } output_arg { - name: "updates" + name: "ms" + type: DT_FLOAT + } + output_arg { + name: "mom" type: DT_FLOAT } output_arg { - name: "gradient_accumulators" + name: "mg" type: DT_FLOAT } attr { @@ -41072,201 +41119,17 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingAdagradParameters" + name: "RetrieveTPUEmbeddingFTRLParameters" output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "accumulators" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingCenteredRMSPropParameters" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "ms" - type: DT_FLOAT - } - output_arg { - name: "mom" - type: DT_FLOAT - } - output_arg { - name: "mg" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingFTRLParameters" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "accumulators" - type: DT_FLOAT - } - output_arg { - name: "linears" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "accumulators" - type: DT_FLOAT - } - output_arg { - name: "linears" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" + type: DT_FLOAT + } + output_arg { + name: "linears" type: DT_FLOAT } attr { @@ -41341,51 +41204,6 @@ op { } is_stateful: true } -op { - name: "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "last_hit_step" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "RetrieveTPUEmbeddingMDLAdagradLightParameters" output_arg { @@ -41476,51 +41294,6 @@ op { } is_stateful: true } -op { - name: "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "momenta" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} op { name: "RetrieveTPUEmbeddingProximalAdagradParameters" output_arg { @@ -41563,205 +41336,17 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "accumulators" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingProximalYogiParameters" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "v" - type: DT_FLOAT - } - output_arg { - name: "m" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "v" - type: DT_FLOAT - } - output_arg { - name: "m" - type: DT_FLOAT - } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingRMSPropParameters" - output_arg { - name: "parameters" - type: DT_FLOAT - } - output_arg { - name: "ms" - type: DT_FLOAT - } - output_arg { - name: "mom" - type: DT_FLOAT - } - attr { - name: "table_id" - type: "int" - default_value { - i: -1 - } - } - attr { - name: "table_name" - type: "string" - default_value { - s: "" - } - } - attr { - name: "num_shards" - type: "int" - } - attr { - name: "shard_id" - type: "int" - } - attr { - name: "config" - type: "string" - default_value { - s: "" - } - } - is_stateful: true -} -op { - name: "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug" + name: "RetrieveTPUEmbeddingProximalYogiParameters" output_arg { name: "parameters" type: DT_FLOAT } output_arg { - name: "ms" - type: DT_FLOAT - } - output_arg { - name: "mom" + name: "v" type: DT_FLOAT } output_arg { - name: "gradient_accumulators" + name: "m" type: DT_FLOAT } attr { @@ -41796,11 +41381,19 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingStochasticGradientDescentParameters" + name: "RetrieveTPUEmbeddingRMSPropParameters" output_arg { name: "parameters" type: DT_FLOAT } + output_arg { + name: "ms" + type: DT_FLOAT + } + output_arg { + name: "mom" + type: DT_FLOAT + } attr { name: "table_id" type: "int" @@ -41833,15 +41426,11 @@ op { is_stateful: true } op { - name: "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug" + name: "RetrieveTPUEmbeddingStochasticGradientDescentParameters" output_arg { name: "parameters" type: DT_FLOAT } - output_arg { - name: "gradient_accumulators" - type: DT_FLOAT - } attr { name: "table_id" type: "int" @@ -44083,6 +43672,61 @@ op { } is_stateful: true } +op { + name: "SaveDatasetV2" + input_arg { + name: "input_dataset" + type: DT_VARIANT + } + input_arg { + name: "path" + type: DT_STRING + } + input_arg { + name: "shard_func_other_args" + type_list_attr: "Tshard_func_args" + } + output_arg { + name: "handle" + type: DT_VARIANT + } + attr { + name: "compression" + type: "string" + default_value { + s: "" + } + } + attr { + name: "shard_func" + type: "func" + } + attr { + name: "use_shard_func" + type: "bool" + default_value { + b: true + } + } + attr { + name: "Tshard_func_args" + type: "list(type)" + has_minimum: true + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + is_stateful: true +} op { name: "SaveSlices" input_arg { @@ -44330,6 +43974,13 @@ op { b: true } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ScatterAdd" @@ -46130,6 +45781,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ShardedFilename" @@ -46210,6 +45868,13 @@ op { b: true } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ShuffleAndRepeatDatasetV2" @@ -46260,6 +45925,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -46303,6 +45975,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "ShuffleDatasetV2" @@ -46334,6 +46013,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -46381,6 +46067,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -46574,6 +46267,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Skipgram" @@ -47000,6 +46700,13 @@ op { type: "list(type)" has_minimum: true } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "SnapshotNestedDatasetReader" @@ -51685,6 +51392,7 @@ op { } } is_stateful: true + is_distributed_communication: true } op { name: "StatefulRandomBinomial" @@ -52348,7 +52056,6 @@ op { } } } - is_stateful: true } op { name: "StatelessRandomGetKeyCounterAlg" @@ -54075,6 +53782,13 @@ op { name: "handle" type: DT_VARIANT } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -54589,6 +54303,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "TakeManySparseFromTensorsMap" @@ -54663,6 +54384,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Tan" @@ -55759,6 +55487,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -56607,6 +56342,20 @@ op { has_minimum: true minimum: 1 } + attr { + name: "is_files" + type: "bool" + default_value { + b: false + } + } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -56763,6 +56512,13 @@ op { name: "handle" type: DT_VARIANT } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } is_stateful: true } op { @@ -57447,6 +57203,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "UnbatchGrad" @@ -57871,6 +57634,13 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "UniqueV2" @@ -58723,10 +58493,26 @@ op { is_stateful: true } op { - name: "Window" + name: "WindowDataset" input_arg { - name: "inputs" - type_list_attr: "Tinputs" + name: "input_dataset" + type: DT_VARIANT + } + input_arg { + name: "size" + type: DT_INT64 + } + input_arg { + name: "shift" + type: DT_INT64 + } + input_arg { + name: "stride" + type: DT_INT64 + } + input_arg { + name: "drop_remainder" + type: DT_BOOL } output_arg { name: "handle" @@ -58745,33 +58531,18 @@ op { minimum: 1 } attr { - name: "Tinputs" - type: "list(type)" - has_minimum: true - minimum: 1 + name: "metadata" + type: "string" + default_value { + s: "" + } } } op { - name: "WindowDataset" - input_arg { - name: "input_dataset" - type: DT_VARIANT - } - input_arg { - name: "size" - type: DT_INT64 - } - input_arg { - name: "shift" - type: DT_INT64 - } - input_arg { - name: "stride" - type: DT_INT64 - } + name: "WindowOp" input_arg { - name: "drop_remainder" - type: DT_BOOL + name: "inputs" + type_list_attr: "Tinputs" } output_arg { name: "handle" @@ -58789,6 +58560,12 @@ op { has_minimum: true minimum: 1 } + attr { + name: "Tinputs" + type: "list(type)" + has_minimum: true + minimum: 1 + } } op { name: "WorkerHeartbeat" @@ -59078,6 +58855,52 @@ op { } } } +op { + name: "XlaAllReduce" + input_arg { + name: "input" + description: "Array or a non-empty tuple of arrays to reduce across replicas." + type_attr: "T" + } + input_arg { + name: "group_assignment" + description: "Groups between which the reductions are performed." + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_INT32 + type: DT_UINT32 + } + } + } + attr { + name: "reduce_op" + type: "string" + description: "Reduction computation." + allowed_values { + list { + s: "Min" + s: "Max" + s: "Mul" + s: "Add" + s: "Mean" + } + } + } + summary: "Wraps the XLA AllReduce operator" + description: " documented at https://www.tensorflow.org/xla/operation_semantics#allreduce." +} op { name: "XlaBroadcastHelper" input_arg { @@ -59159,6 +58982,40 @@ op { } summary: "Operator that connects the output of an XLA computation to other consumer graph nodes." } +op { + name: "XlaConcatND" + input_arg { + name: "inputs" + type_attr: "T" + number_attr: "N" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "num_concats" + type: "list(int)" + } + attr { + name: "paddings" + type: "list(int)" + default_value { + list { + } + } + } +} op { name: "XlaConv" input_arg { @@ -60118,6 +59975,57 @@ op { summary: "Wraps the XLA Reduce operator, documented at" description: " https://www.tensorflow.org/performance/xla/operation_semantics#reduce ." } +op { + name: "XlaReduceScatter" + input_arg { + name: "input" + description: "Array or a non-empty tuple of arrays to reduce across replicas." + type_attr: "T" + } + input_arg { + name: "group_assignment" + description: "Groups between which the reductions are performed." + type: DT_INT32 + } + input_arg { + name: "scatter_dimension" + description: "Dimension to scatter." + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_INT32 + type: DT_UINT32 + } + } + } + attr { + name: "reduce_op" + type: "string" + description: "Reduction computation." + allowed_values { + list { + s: "Min" + s: "Max" + s: "Mul" + s: "Add" + s: "Mean" + } + } + } + summary: "Wraps the XLA ReduceScatter operator" + description: " documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter." +} op { name: "XlaReduceWindow" input_arg { @@ -60219,8 +60127,8 @@ op { name: "T" type: "type" } - summary: "Inverse of XlaSetDynamicDimensionSize. Make an xla bounded" - description: " dynamic dimension into a static dimension. The bound of the size of\n dimension `dim_index` becomes the static dimension size." + summary: "Inverse of XlaSetDynamicDimensionSize." + description: "Make an xla bounded dynamic dimension into a static dimension. The bound of the\nsize of dimension `dim_index` becomes the static dimension size." } op { name: "XlaReplicaId" @@ -60230,6 +60138,63 @@ op { } summary: "Replica ID." } +op { + name: "XlaRngBitGenerator" + input_arg { + name: "algorithm" + description: "The PRNG algorithm to use, one of\ntf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}." + type: DT_INT32 + } + input_arg { + name: "initial_state" + description: "Initial state for the PRNG algorithm. For THREEFRY, it should be\na u64[2] and for PHILOX a u64[3]." + type: DT_UINT64 + } + input_arg { + name: "shape" + description: "The output shape of the generated data." + type_attr: "Tshape" + } + output_arg { + name: "output_key" + type: DT_UINT64 + } + output_arg { + name: "output" + type_attr: "dtype" + } + attr { + name: "dtype" + type: "type" + default_value { + type: DT_UINT64 + } + description: "The type of the tensor." + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + type: DT_UINT32 + type: DT_UINT64 + } + } + } + attr { + name: "Tshape" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + summary: "Stateless PRNG bit generator." + description: "Wraps the XLA RngBitGenerator operator, documented at\n https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator." +} op { name: "XlaScatter" input_arg { @@ -60548,7 +60513,16 @@ op { s: "" } } - summary: "An op which shards the input based on the given sharding attribute." + attr { + name: "unspecified_dims" + type: "list(int)" + default_value { + list { + } + } + } + summary: "An op which shards the input based on the given sharding attribute. It can" + description: "selectively annotate a subset of tensor dimensions by skipping unspecified_dims,\nand the sharding annotation should be replicated in those dims." } op { name: "XlaSort" @@ -60569,6 +60543,40 @@ op { summary: "Wraps the XLA Sort operator, documented at" description: " https://www.tensorflow.org/performance/xla/operation_semantics#sort\n.\n\nSorts a tensor. Currently only sorts in ascending order are supported." } +op { + name: "XlaSplitND" + input_arg { + name: "input" + type_attr: "T" + } + output_arg { + name: "outputs" + type_attr: "T" + number_attr: "N" + } + attr { + name: "T" + type: "type" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "num_splits" + type: "list(int)" + } + attr { + name: "paddings" + type: "list(int)" + default_value { + list { + } + } + } +} op { name: "XlaSpmdFullToShardShape" input_arg { @@ -60587,8 +60595,23 @@ op { name: "manual_sharding" type: "string" } + attr { + name: "dim" + type: "int" + default_value { + i: -1 + } + } + attr { + name: "unspecified_dims" + type: "list(int)" + default_value { + list { + } + } + } summary: "An op used by XLA SPMD partitioner to switch from automatic partitioning to" - description: "manual partitioning. It annotates the input (full-shape, to be automatically\npartitioned) with the same sharding used by manual partitioning, and outputs a\nshard-shaped tensor to be consumed by later manually-partitioned ops. If the\nshape is not evenly partitionable, the padding region will be masked with 0s." + description: "manual partitioning. It annotates the input (full-shape, to be automatically\npartitioned) with the same sharding used by manual partitioning, and outputs a\nshard-shaped tensor to be consumed by later manually-partitioned ops. If the\nshape is not evenly partitionable, the padding region will be masked with 0s.\nThe conversion can happen partially in subgroups, by specifying the dim\nattribute, where only that dim will be converted." } op { name: "XlaSpmdShardToFullShape" @@ -60612,8 +60635,23 @@ op { name: "full_shape" type: "shape" } + attr { + name: "dim" + type: "int" + default_value { + i: -1 + } + } + attr { + name: "unspecified_dims" + type: "list(int)" + default_value { + list { + } + } + } summary: "An op used by XLA SPMD partitioner to switch from manual partitioning to" - description: "automatic partitioning. It converts the shard-shaped, manually partitioned input\ninto full-shaped tensor to be partitioned automatically with the same sharding\nused by manual partitioning." + description: "automatic partitioning. It converts the shard-shaped, manually partitioned input\ninto full-shaped tensor to be partitioned automatically with the same sharding\nused by manual partitioning. The conversion can happen partially in subgroups,\nby specifying the dim attribute, where only that dim will be converted." } op { name: "XlaSvd" @@ -60742,7 +60780,42 @@ op { description: "a reducer function to apply" } summary: "Wraps the variadic XLA Reduce operator." - description: "Semantics are documented at\n https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce." + description: "Semantics are documented at\n https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.\n\nThis version is limited to operands of the same dtype.\nXlaVariadicReduceV2 is a version that supports heterogeneous operands." +} +op { + name: "XlaVariadicReduceV2" + input_arg { + name: "inputs" + description: "the input tensor(s)" + type_list_attr: "T" + } + input_arg { + name: "init_values" + description: "scalar initial value(s) for the reduction" + type_list_attr: "T" + } + output_arg { + name: "outputs" + type_list_attr: "T" + } + attr { + name: "T" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "dimensions_to_reduce" + type: "list(int)" + description: "dimension numbers over which to reduce" + } + attr { + name: "reducer" + type: "func" + description: "a reducer function to apply" + } + summary: "Wraps the variadic XLA Reduce operator." + description: "Semantics are documented at\n https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.\n\nThis is an expanded version of XlaVariadicReduce, with support for\noperands of different dtypes, and improved shape inference." } op { name: "XlaVariadicSort" @@ -60935,4 +61008,11 @@ op { has_minimum: true minimum: 1 } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java index da9921daea3..9b3258fb08c 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java @@ -39,7 +39,7 @@ value = { @Platform( value = {"linux", "macosx", "windows"}, - compiler = "cpp11", + compiler = "cpp14", include = { "tensorflow/core/platform/ctstring_internal.h", "tensorflow/core/platform/ctstring.h", @@ -317,6 +317,7 @@ public void map(InfoMap infoMap) { "gtl::iterator_range", "tensorflow::DataType", "tensorflow::DataTypeVector", + "tensorflow::Node::set_original_func_names", "tensorflow::Node::set_original_node_names", "tensorflow::Node::AddAttr", "tensorflow::Node::ClearAttr", @@ -511,6 +512,7 @@ public void map(InfoMap infoMap) { "tensorflow::Status::ErasePayload", "tensorflow::Status::SetPayload", "tensorflow::Status::GetPayload", + "tensorflow::Status::ForEachPayload", "tensorflow::Node::SetStackTrace", "tensorflow::Node::GetStackTrace") .skip()); diff --git a/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc index 7724d2a6e69..f454087354f 100644 --- a/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc +++ b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc @@ -70,15 +70,17 @@ # rbe_cpu_linux: RBE options to build with only CPU support. # rbe_linux_cuda_nvcc_py*: RBE options to build with GPU support using nvcc. # -# rbe_linux_py2: Linux Python 2 RBE config. # rbe_linux_py3: Linux Python 3 RBE config # # rbe_win_py37: Windows Python 3.7 RBE config # rbe_win_py38: Windows Python 3.8 RBE config +# rbe_win_py39: Windows Python 3.9 RBE config # # tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux # tensorflow_testing_rbe_win: RBE options to use RBE with tensorflow-testing project on windows # +# rbe_lite_linux: RBE options to build TF Lite. +# # Embedded Linux options (experimental and only tested with TFLite build yet) # elinux: General Embedded Linux options shared by all flavors. # elinux_aarch64: Embedded Linux options for aarch64 (ARM64) CPU support. @@ -254,7 +256,7 @@ build:tpu --define=with_tpu_support=true build:tensorrt --repo_env TF_NEED_TENSORRT=1 build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain -build:rocm --define=using_rocm=true --define=using_rocm_hipcc=true +build:rocm --define=using_rocm_hipcc=true build:rocm --repo_env TF_NEED_ROCM=1 # Options extracted from configure script @@ -347,11 +349,19 @@ build:windows --host_linkopt=/OPT:ICF # Verbose failure logs when something goes wrong build:windows --verbose_failures +# Work around potential issues with large command lines on windows. +# See: https://github.com/bazelbuild/bazel/issues/5163 +build:windows --features=compiler_param_file + # On windows, we never cross compile build:windows --distinct_host_configuration=false # On linux, don't cross compile by default build:linux --distinct_host_configuration=false +# Do not risk cache corruption. See: +# https://github.com/bazelbuild/bazel/issues/3360 +build:linux --experimental_guard_against_concurrent_changes + # Configure short or long logs build:short_logs --output_filter=DONT_MATCH_ANYTHING build:verbose_logs --output_filter= @@ -371,7 +381,7 @@ build:v1 --define=tf_api_version=1 --action_env=TF2_BEHAVIOR=0 build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1 # Disable XLA on mobile. -build:xla --define=with_xla_supprt=true # TODO: remove, it's on by default. +build:xla --define=with_xla_support=true # TODO: remove, it's on by default. build:android --define=with_xla_support=false build:ios --define=with_xla_support=false @@ -399,13 +409,14 @@ test:rbe --test_env=USER=anon # workers: build:rbe --remote_download_toplevel -build:rbe_linux --config=rbe -build:rbe_linux --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin" -build:rbe_linux --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 -build:rbe_linux --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 -build:rbe_linux --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 -build:rbe_linux --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:rbe_linux_base --config=rbe +build:rbe_linux_base --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin" +build:rbe_linux_base --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 +build:rbe_linux_base --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 +build:rbe_linux_base --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:rbe_linux_base --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:rbe_linux --config=rbe_linux_base # Non-rbe settings we should include because we do not run configure build:rbe_linux --config=avx_linux # TODO(gunan): Check why we need this specified in rbe, but not in other builds. @@ -414,14 +425,22 @@ build:rbe_linux --host_linkopt=-lrt build:rbe_linux --linkopt=-lm build:rbe_linux --host_linkopt=-lm +# Use the GPU toolchain until the CPU one is ready. +# https://github.com/bazelbuild/bazel/issues/13623 +build:rbe_cpu_linux_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +build:rbe_cpu_linux_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +build:rbe_cpu_linux_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64" +build:rbe_cpu_linux_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_cpu_linux_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_cpu_linux_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" + build:rbe_cpu_linux --config=rbe_linux -build:rbe_cpu_linux --host_crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" -build:rbe_cpu_linux --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" -build:rbe_cpu_linux --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:cc-toolchain-k8" -build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --host_platform="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" +build:rbe_cpu_linux --config=rbe_cpu_linux_base + +build:rbe_lite_linux --config=rbe_linux_base +build:rbe_lite_linux --config=rbe_cpu_linux_base +build:rbe_lite_linux --config=rbe_linux_py3_base +build:rbe_lite_linux --noexperimental_check_desugar_deps build:rbe_linux_cuda_base --config=rbe_linux build:rbe_linux_cuda_base --config=cuda @@ -429,7 +448,10 @@ build:rbe_linux_cuda_base --config=tensorrt build:rbe_linux_cuda_base --action_env=TF_CUDA_VERSION=11 build:rbe_linux_cuda_base --action_env=TF_CUDNN_VERSION=8 build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1 -test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64" +# TensorRT 7 for CUDA 11.1 is compatible with CUDA 11.2, but requires +# libnvrtc.so.11.1. See https://github.com/NVIDIA/TensorRT/issues/1064. +# TODO(b/187962120): Remove when upgrading to TensorRT 8. +test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64" build:rbe_linux_cuda11.2_nvcc_base --config=rbe_linux_cuda_base build:rbe_linux_cuda11.2_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" @@ -441,13 +463,11 @@ build:rbe_linux_cuda11.2_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010- build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda" build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt" build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl" -build:rbe_linux_cuda11.2_nvcc_py3.6 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6" build:rbe_linux_cuda11.2_nvcc_py3.7 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7" build:rbe_linux_cuda11.2_nvcc_py3.8 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8" build:rbe_linux_cuda11.2_nvcc_py3.9 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9" # Map default to CUDA 11.2. -build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda11.2_nvcc_py3.6 build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.2_nvcc_py3.7 build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8 build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9 @@ -467,46 +487,39 @@ build:rbe_linux_cuda_clang_base --platforms="@ubuntu18.04-clang_manylinux2010-cu build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda" build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt" build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl" -build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python2.7" -build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.5" -build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6" build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7" build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8" +build:rbe_linux_cuda_clang_py39 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9" # ROCm +build:rbe_linux_rocm_base --config=rocm build:rbe_linux_rocm_base --config=rbe_linux -build:rbe_linux_rocm_base --repo_env=TF_NEED_ROCM=1 build:rbe_linux_rocm_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain" build:rbe_linux_rocm_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain-linux-x86_64" build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" build:rbe_linux_rocm_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" build:rbe_linux_rocm_base --platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm" -build:rbe_linux_rocm_base --define=using_rocm_hipcc=true -build:rbe_linux_rocm_py2.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python2.7" -build:rbe_linux_rocm_py3.5 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.5" build:rbe_linux_rocm_py3.6 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.6" build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.7" build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.8" +build:rbe_linux_rocm_py3.9 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.9" # Linux CPU -build:rbe_linux_py2 --config=rbe_linux -build:rbe_linux_py2 --repo_env=PYTHON_BIN_PATH="/usr/bin/python2" -build:rbe_linux_py2 --python_path="/usr/bin/python2" -build:rbe_linux_py2 --repo_env=TF_PYTHON_CONFIG_REPO="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/py" build:rbe_linux_py3 --config=rbe_linux -build:rbe_linux_py3 --python_path="/usr/bin/python3" -build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-manylinux2010-py3_config_python" +build:rbe_linux_py3 --config=rbe_linux_py3_base +build:rbe_linux_py3_base --python_path="/usr/local/bin/python3.9" +build:rbe_linux_py3_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9" build:rbe_win --config=rbe -build:rbe_win --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_06242021:toolchain" -build:rbe_win --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_06242021:cc-toolchain-x64_windows" -build:rbe_win --host_javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" -build:rbe_win --javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" -build:rbe_win --extra_execution_platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" -build:rbe_win --host_platform="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" -build:rbe_win --platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" +build:rbe_win --crosstool_top="@tf_toolchains//toolchains/win/tf_win_06242021:toolchain" +build:rbe_win --extra_toolchains="@tf_toolchains//toolchains/win/tf_win_06242021:cc-toolchain-x64_windows" +build:rbe_win --host_javabase="@tf_toolchains//toolchains/win:windows_jdk8" +build:rbe_win --javabase="@tf_toolchains//toolchains/win:windows_jdk8" +build:rbe_win --extra_execution_platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019" +build:rbe_win --host_platform="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019" +build:rbe_win --platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019" build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe build:rbe_win --experimental_strict_action_env=true @@ -526,9 +539,15 @@ build:rbe_win_py37 --python_path=C:\\Python37\\python.exe build:rbe_win_py38 --config=rbe build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages -build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@org_tensorflow//third_party/toolchains/preconfig/win_1803/py38 +build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@tf_toolchains//toolchains/win_1803/py38 build:rbe_win_py38 --python_path=C:\\Python38\\python.exe +build:rbe_win_py39 --config=rbe +build:rbe_win_py39 --repo_env=PYTHON_BIN_PATH=C:\\Python39\\python.exe +build:rbe_win_py39 --repo_env=PYTHON_LIB_PATH=C:\\Python39\\lib\\site-packages +build:rbe_win_py39 --repo_env=TF_PYTHON_CONFIG_REPO=@tf_toolchains//toolchains/win_1803/py39 +build:rbe_win_py39 --python_path=C:\\Python39\\python.exe + # These you may need to change for your own GCP project. build:tensorflow_testing_rbe --project_id=tensorflow-testing common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance @@ -564,7 +583,7 @@ test:release_base --test_size_filters=small,medium build:release_cpu_linux --config=release_base build:release_cpu_linux --config=avx_linux -build:release_cpu_linux --crosstool_top=@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11.2:toolchain +build:release_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" test:release_cpu_linux --test_env=LD_LIBRARY_PATH build:release_cpu_macos --config=release_base @@ -580,8 +599,14 @@ build:release_gpu_linux --config=release_gpu_base build:release_gpu_linux --config=tensorrt build:release_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2" build:release_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/tensorrt/lib" -build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/usr/bin/gcc-5" -build:release_gpu_linux --crosstool_top=@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11.2:toolchain +build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt7/usr/bin/gcc" +build:release_gpu_linux --crosstool_top=@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain + +build:release_gpu_linux_11_4 --config=release_gpu_linux +build:release_gpu_linux_11_4 --action_env CUDA_TOOLKIT_PATH="/usr/local/cuda-11.4" +build:release_gpu_linux_11_4 --action_env=TF_CUDA_VERSION="11.4" +build:release_gpu_linux_11_4 --action_env=TF_CUDNN_VERSION="8.2" +build:release_gpu_linux_11_4 --crosstool_top=@ubuntu18.04-gcc7_manylinux2010-cuda11.4-cudnn8.2-tensorrt7.2_config_cuda//crosstool:toolchain build:release_cpu_windows --config=release_base build:release_cpu_windows --config=avx_win @@ -623,3 +648,19 @@ build:ubsan --copt -O3 build:ubsan --copt -fno-omit-frame-pointer build:ubsan --linkopt -fsanitize=undefined build:ubsan --linkopt -lubsan + +# Disable TFRT integration for now unless --config=tfrt is specified. +build --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/fallback,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils +build:tfrt --deleted_packages= + +# Experimental configuration for testing XLA GPU lowering to TFRT BEF thunks. +# bazel test --config=experimental_enable_bef_thunk \ +# //tensorflow/compiler/xla/service/gpu/tests:mlir_gemm_test +build:experimental_enable_bef_thunk --config=tfrt +build:experimental_enable_bef_thunk --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk +build:experimental_enable_bef_thunk --@tf_runtime//:enable_gpu +build:experimental_enable_bef_thunk --@rules_cuda//cuda:enable_cuda +build:experimental_enable_bef_thunk --@rules_cuda//cuda:cuda_runtime=//tensorflow/compiler/xla/service/gpu:cuda_runtime_for_xlir +build:experimental_enable_bef_thunk --nocheck_visibility +build:experimental_enable_bef_thunk --incompatible_strict_action_env +build:experimental_enable_bef_thunk --config=monolithic \ No newline at end of file From e823d852a8f92a0ad5aa5749bac198757443515a Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Thu, 18 Nov 2021 21:26:05 -0500 Subject: [PATCH 03/21] Update ci.yml Trying devtool-9 to fix gcc bug for linux gpu --- .github/workflows/ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1c79ea46728..0fd044979af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,14 +26,14 @@ jobs: run: | yum -y update yum -y install centos-release-scl-rh epel-release - yum -y install java-11-openjdk-devel devtoolset-7 + yum -y install java-11-openjdk-devel devtoolset-9 echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn - name: Build project run: | - source scl_source enable devtoolset-7 || true + source scl_source enable devtoolset-9 || true export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) echo $JAVA_HOME mvn -version @@ -52,14 +52,14 @@ jobs: run: | yum -y update yum -y install centos-release-scl-rh epel-release - yum -y install java-11-openjdk-devel devtoolset-7 + yum -y install java-11-openjdk-devel devtoolset-9 echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn - name: Build project run: | - source scl_source enable devtoolset-7 || true + source scl_source enable devtoolset-9 || true export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) echo $JAVA_HOME mvn -version @@ -99,7 +99,7 @@ jobs: yum --disablerepo updates -y install $GLIBC yum -x "$GLIBC" -y update yum -x "$GLIBC" -y install centos-release-scl-rh epel-release - yum -x "$GLIBC" -y install java-1.8.0-openjdk-devel devtoolset-7 rh-git218 patch perl-Data-Dumper python36-devel python36-numpy python36-pip python36-six + yum -x "$GLIBC" -y install java-1.8.0-openjdk-devel devtoolset-9 rh-git218 patch perl-Data-Dumper python36-devel python36-numpy python36-pip python36-six echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ @@ -128,7 +128,7 @@ jobs: uses: actions/checkout@v1 - name: Build project run: | - source scl_source enable devtoolset-7 rh-git218 || true + source scl_source enable devtoolset-9 rh-git218 || true git --version gcc --version mvn -version From 36d3ea37acd53d222b12e37fb221bdf74438c360 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Mon, 22 Nov 2021 10:49:26 -0500 Subject: [PATCH 04/21] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5c117740db8..b7b5e842ce4 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ This table shows the mapping between different version of TensorFlow for Java an | 0.3.1 | 2.4.1 | | 0.3.2 | 2.4.1 | | 0.3.3 | 2.4.1 | -| 0.4.0-SNAPSHOT | 2.6.0 +| 0.4.0-SNAPSHOT | 2.7.0 ## How to Contribute? From e123e242619930dd2e6b32f4605846c6670c296f Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Sat, 27 Nov 2021 21:20:50 -0500 Subject: [PATCH 05/21] Releasing 0.4.0 --- README.md | 16 ++++++++-------- pom.xml | 2 +- tensorflow-core/pom.xml | 2 +- tensorflow-core/tensorflow-core-api/pom.xml | 2 +- .../tensorflow-core-generator/pom.xml | 2 +- .../tensorflow-core-platform-gpu/pom.xml | 2 +- tensorflow-core/tensorflow-core-platform/pom.xml | 2 +- tensorflow-framework/pom.xml | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index b7b5e842ce4..d482990c499 100644 --- a/README.md +++ b/README.md @@ -56,12 +56,12 @@ systems, you should add the following dependencies: org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 linux-x86_64${javacpp.platform.extension} ``` @@ -72,24 +72,24 @@ native dependencies as follows: org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 linux-x86_64${javacpp.platform.extension} org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 macosx-x86_64${javacpp.platform.extension} org.tensorflow tensorflow-core-api - 0.3.3 + 0.4.0 windows-x86_64${javacpp.platform.extension} ``` @@ -102,7 +102,7 @@ artifact includes transitively all the artifacts above as a single dependency: org.tensorflow tensorflow-core-platform${javacpp.platform.extension} - 0.3.3 + 0.4.0 ``` @@ -148,7 +148,7 @@ This table shows the mapping between different version of TensorFlow for Java an | 0.3.1 | 2.4.1 | | 0.3.2 | 2.4.1 | | 0.3.3 | 2.4.1 | -| 0.4.0-SNAPSHOT | 2.7.0 +| 0.4.0 | 2.7.0 | ## How to Contribute? diff --git a/pom.xml b/pom.xml index ed06b80c231..4c9b019a003 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ org.tensorflow tensorflow-java - 0.4.0-SNAPSHOT + 0.4.0 pom TensorFlow Java Parent diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 2d98332fa9f..407f7f690b5 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-core pom diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index dc990be8850..4d61c0b8cc0 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -6,7 +6,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-core-api jar diff --git a/tensorflow-core/tensorflow-core-generator/pom.xml b/tensorflow-core/tensorflow-core-generator/pom.xml index 25608fe7e24..14786145f65 100644 --- a/tensorflow-core/tensorflow-core-generator/pom.xml +++ b/tensorflow-core/tensorflow-core-generator/pom.xml @@ -5,7 +5,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-core-generator jar diff --git a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml index ef65acc87ca..50a4c4cace5 100644 --- a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-core-platform-gpu TensorFlow Core API Library Platform GPU diff --git a/tensorflow-core/tensorflow-core-platform/pom.xml b/tensorflow-core/tensorflow-core-platform/pom.xml index 4a5b370ca8f..165079d2e74 100644 --- a/tensorflow-core/tensorflow-core-platform/pom.xml +++ b/tensorflow-core/tensorflow-core-platform/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-core-platform TensorFlow Core API Library Platform diff --git a/tensorflow-framework/pom.xml b/tensorflow-framework/pom.xml index af7f47815d5..238b2610622 100644 --- a/tensorflow-framework/pom.xml +++ b/tensorflow-framework/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0-SNAPSHOT + 0.4.0 tensorflow-framework jar From b577f3988c7f66c919be53dbdd9cb5e62798a491 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Sun, 28 Nov 2021 13:04:25 -0500 Subject: [PATCH 06/21] Increase version to next iteration --- README.md | 1 + pom.xml | 2 +- tensorflow-core/pom.xml | 2 +- tensorflow-core/tensorflow-core-api/pom.xml | 2 +- .../gen/annotations/org/tensorflow/op/Ops.java | 16 ++++++++-------- .../tensorflow-core-generator/pom.xml | 2 +- .../tensorflow-core-platform-gpu/pom.xml | 2 +- tensorflow-core/tensorflow-core-platform/pom.xml | 2 +- tensorflow-framework/pom.xml | 2 +- 9 files changed, 16 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index d482990c499..305fb1e759a 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,7 @@ This table shows the mapping between different version of TensorFlow for Java an | 0.3.2 | 2.4.1 | | 0.3.3 | 2.4.1 | | 0.4.0 | 2.7.0 | +| 0.5.0-SNAPSHOT | 2.7.0 | ## How to Contribute? diff --git a/pom.xml b/pom.xml index 4c9b019a003..f4f1b18928b 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.5.0-SNAPSHOT pom TensorFlow Java Parent diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 407f7f690b5..d2a3e9d393d 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-core pom diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 4d61c0b8cc0..b6f9da1a2bd 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -6,7 +6,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-core-api jar diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index fc6b447ab31..223754b0480 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -367,20 +367,20 @@ public final class Ops { public final SparseOps sparse; - public final TpuOps tpu; - public final BitwiseOps bitwise; - public final MathOps math; + public final TpuOps tpu; public final AudioOps audio; - public final SignalOps signal; + public final MathOps math; - public final QuantizationOps quantization; + public final SignalOps signal; public final TrainOps train; + public final QuantizationOps quantization; + private final Scope scope; Ops(Scope scope) { @@ -398,13 +398,13 @@ public final class Ops { random = new RandomOps(this); strings = new StringsOps(this); sparse = new SparseOps(this); - tpu = new TpuOps(this); bitwise = new BitwiseOps(this); - math = new MathOps(this); + tpu = new TpuOps(this); audio = new AudioOps(this); + math = new MathOps(this); signal = new SignalOps(this); - quantization = new QuantizationOps(this); train = new TrainOps(this); + quantization = new QuantizationOps(this); } /** diff --git a/tensorflow-core/tensorflow-core-generator/pom.xml b/tensorflow-core/tensorflow-core-generator/pom.xml index 14786145f65..52e87a0619a 100644 --- a/tensorflow-core/tensorflow-core-generator/pom.xml +++ b/tensorflow-core/tensorflow-core-generator/pom.xml @@ -5,7 +5,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-core-generator jar diff --git a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml index 50a4c4cace5..f6628b64da1 100644 --- a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-core-platform-gpu TensorFlow Core API Library Platform GPU diff --git a/tensorflow-core/tensorflow-core-platform/pom.xml b/tensorflow-core/tensorflow-core-platform/pom.xml index 165079d2e74..5717b1f98a8 100644 --- a/tensorflow-core/tensorflow-core-platform/pom.xml +++ b/tensorflow-core/tensorflow-core-platform/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-core-platform TensorFlow Core API Library Platform diff --git a/tensorflow-framework/pom.xml b/tensorflow-framework/pom.xml index 238b2610622..026bf227afe 100644 --- a/tensorflow-framework/pom.xml +++ b/tensorflow-framework/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.5.0-SNAPSHOT tensorflow-framework jar From f07e89e5c132594178b0fb3be9829c09e0f24eef Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Sun, 28 Nov 2021 23:13:53 -0500 Subject: [PATCH 07/21] Updating versions in install instructions --- docs/install.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/docs/install.md b/docs/install.md index b102782ed4f..b1831a05ace 100644 --- a/docs/install.md +++ b/docs/install.md @@ -41,14 +41,8 @@ TensorFlow Java to your project. The easiest one is to add a dependency on the Core API and the native dependencies it requires to run on all supported platforms. -You can also select one of the following extensions instead of the pure CPU -version: - -* `tensorflow-core-platform-mkl`: Support for Intel® MKL-DNN on all platforms -* `tensorflow-core-platform-gpu`: Support for CUDA® on Linux and Windows - platforms -* `tensorflow-core-platform-mkl-gpu`: Support for Intel® MKL-DNN and CUDA® on - Linux platform. +You can also select the `tensorflow-core-platform-gpu` extension instead, which +supports CUDA® on Linux and Windows platforms. In addition, a separate dependency on the `tensorflow-framework` library can be added to benefit from a rich set of utilities for TensorFlow-based machine @@ -64,7 +58,7 @@ For example, org.tensorflow tensorflow-core-platform - 0.3.3 + 0.4.0 ``` @@ -107,7 +101,7 @@ snapshots repository in your `pom.xml`. org.tensorflow tensorflow-core-platform - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT ``` @@ -124,7 +118,7 @@ repositories { } dependencies { - compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '0.3.3' + compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '0.4.0' } ``` @@ -170,7 +164,7 @@ add the TensorFlow dependency to the project's `pom.xml` file: org.tensorflow tensorflow-core-platform - 0.3.3 + 0.4.0 From 1f691925187017346b19ec944a6ca6ebd83d2451 Mon Sep 17 00:00:00 2001 From: Frank Liu Date: Fri, 14 Jan 2022 05:18:26 -0800 Subject: [PATCH 08/21] Fix NullPointerException issue. (#407) --- .../src/main/java/org/tensorflow/EagerOperation.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java index 29449847be8..bf0ed87586d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java @@ -190,9 +190,9 @@ private static Tensor resolveTensorHandle(TFE_TensorHandle handle, EagerSession requireTensorHandle(handle); try (PointerScope scope = new PointerScope()) { TF_Status status = TF_Status.newStatus(); - TF_Tensor tensor = TFE_TensorHandleResolve(handle, status).withDeallocator(); + TF_Tensor tensor = TFE_TensorHandleResolve(handle, status); status.throwExceptionIfNotOK(); - return RawTensor.fromHandle(tensor, session).asTypedTensor(); + return RawTensor.fromHandle(tensor.withDeallocator(), session).asTypedTensor(); } } From d518678c632778bd8805fee16939a7b6afe53597 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Wed, 26 Jan 2022 21:21:44 -0500 Subject: [PATCH 09/21] Add sparse tensor mappings (#405) --- tensorflow-core/tensorflow-core-api/pom.xml | 2 +- .../main/java/org/tensorflow/RawTensor.java | 51 +++-- .../java/org/tensorflow/SparseTensor.java | 165 ++++++++++++++ .../src/main/java/org/tensorflow/Tensor.java | 100 +++++---- .../java/org/tensorflow/TensorMapper.java | 21 +- .../internal/types/SparseHelpers.java | 49 +++++ .../internal/types/TBfloat16Mapper.java | 86 +++++++- .../internal/types/TBoolMapper.java | 83 ++++++- .../internal/types/TFloat16Mapper.java | 86 +++++++- .../internal/types/TFloat32Mapper.java | 83 ++++++- .../internal/types/TFloat64Mapper.java | 83 ++++++- .../internal/types/TInt32Mapper.java | 83 ++++++- .../internal/types/TInt64Mapper.java | 82 ++++++- .../internal/types/TStringMapper.java | 113 +++++++++- .../internal/types/TUint8Mapper.java | 83 ++++++- .../java/org/tensorflow/types/TBfloat16.java | 31 ++- .../main/java/org/tensorflow/types/TBool.java | 25 +++ .../java/org/tensorflow/types/TFloat16.java | 30 ++- .../java/org/tensorflow/types/TFloat32.java | 28 ++- .../java/org/tensorflow/types/TFloat64.java | 29 ++- .../java/org/tensorflow/types/TInt32.java | 26 ++- .../java/org/tensorflow/types/TInt64.java | 25 +++ .../java/org/tensorflow/types/TString.java | 29 ++- .../java/org/tensorflow/types/TUint8.java | 25 +++ .../org/tensorflow/types/family/TType.java | 46 ++-- .../java/org/tensorflow/SparseTensorTest.java | 207 ++++++++++++++++++ 26 files changed, 1535 insertions(+), 136 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SparseTensor.java create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/SparseHelpers.java create mode 100644 tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SparseTensorTest.java diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index b6f9da1a2bd..142aac1065f 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -20,7 +20,7 @@ ${native.build.skip} ${native.build.skip} org.tensorflow.core.api - 0.3.3 + 0.4.0-SNAPSHOT 1.0.1 diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java index 2a4a21face3..5693e23c560 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java @@ -34,12 +34,12 @@ * A tensor which memory has not been mapped to a data space directly accessible from the JVM. * *

    A raw tensor is a minimalist representation of a tensor allocated in native memory by the - * TensorFlow runtime library and it controls its lifetime within the current process. The data - * is represented by a flat {@link ByteDataBuffer buffer of bytes}, until it is mapped in a - * n-dimensional typed space by a {@link TType typed tensor}.

    + * TensorFlow runtime library and it controls its lifetime within the current process. The data is + * represented by a flat {@link ByteDataBuffer buffer of bytes}, until it is mapped in a + * n-dimensional typed space by a {@link TType typed tensor}. * - *

    Instances of a RawTensor are not thread-safe and their resource must be released - * by calling {@link #close()} explicitly or implicitly via try-with-resources.

    + *

    Instances of a RawTensor are not thread-safe and their resource must be released by + * calling {@link #close()} explicitly or implicitly via try-with-resources. */ public final class RawTensor implements Tensor { @@ -81,9 +81,7 @@ public ByteDataBuffer data() { return buffer; } - /** - * Returns a string describing the type and shape of the tensor. - */ + /** Returns a string describing the type and shape of the tensor. */ @Override public String toString() { return String.format("%s tensor with shape %s", typeInfo.dataType(), shape); @@ -92,20 +90,20 @@ public String toString() { /** * Allocates a new tensor in native memory of the given type, shape and size. * - *

    The size of the tensor must be at least large enough to contain all scalars for the - * given type and shape. More memory can also be allocated to store also metadata within the - * tensor itself, e.g. a lookup table in a string tensor. + *

    The size of the tensor must be at least large enough to contain all scalars for the given + * type and shape. More memory can also be allocated to store also metadata within the tensor + * itself, e.g. a lookup table in a string tensor. * * @param type tensor type class * @param shape shape of the tensor * @param size size in bytes of the tensor, or -1 to compute the size from the shape * @return allocated tensor * @throws IllegalArgumentException if {@code size} is smaller than the minimum space required to - * store the tensor data - * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given - * {@code type} are of variable length (e.g. strings) - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * store the tensor data + * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given {@code + * type} are of variable length (e.g. strings) + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated */ static RawTensor allocate(Class type, Shape shape, long size) { @@ -123,12 +121,14 @@ static RawTensor allocate(Class type, Shape shape, long size) { allocatedSize = shape.size() * typeInfo.byteSize(); } else if (!typeInfo.isVariableLength() && shape.size() * typeInfo.byteSize() > allocatedSize) { - // Minimum requirements for datatypes of variable length cannot be verified in a relevant way so + // Minimum requirements for datatypes of variable length cannot be verified in a relevant way + // so // we only validate them for fixed length datatypes throw new IllegalArgumentException( "Tensor size is not large enough to contain all scalar values"); } - TF_Tensor nativeHandle = allocate(typeInfo.dataType().getNumber(), shape.asArray(), allocatedSize); + TF_Tensor nativeHandle = + allocate(typeInfo.dataType().getNumber(), shape.asArray(), allocatedSize); try (PointerScope scope = new PointerScope()) { scope.attach(nativeHandle); RawTensor t = new RawTensor(typeInfo, shape); @@ -147,9 +147,9 @@ static RawTensor fromHandle(TF_Tensor handle) { TensorTypeInfo typeInfo = TensorTypeRegistry.find(DataType.forNumber(dtype(handle))); RawTensor t = new RawTensor(typeInfo, Shape.of(shape(handle))); try (PointerScope scope = new PointerScope()) { - scope.attach(handle); - t.tensorHandle = handle; - t.tensorScope = scope.extend(); + scope.attach(handle); + t.tensorHandle = handle; + t.tensorScope = scope.extend(); } return t; } @@ -168,6 +168,7 @@ static RawTensor fromHandle(TF_Tensor handle, EagerSession session) { /** * Returns the native handle to this tensor + * * @throws IllegalStateException if tensor has been closed */ TF_Tensor nativeHandle() { @@ -178,7 +179,8 @@ TF_Tensor nativeHandle() { * Returns a typed reference to this tensor * *

    In some cases, it is more useful to keep a typed reference to a tensor rather than its raw - * nature to prevent mapping its memory on every access (e.g. when calling {@link Operand#asTensor()}). + * nature to prevent mapping its memory on every access (e.g. when calling {@link + * Operand#asTensor()}). * * @return typed reference to this tensor */ @@ -186,6 +188,11 @@ TType asTypedTensor() { return typeInfo.mapper().mapDense(this); } + /** @return metadata about the type of this tensor. */ + TensorTypeInfo typeInfo() { + return typeInfo; + } + private static TF_Tensor requireHandle(TF_Tensor handle) { if (handle == null || handle.isNull()) { throw new IllegalStateException("close() was called on the Tensor"); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SparseTensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SparseTensor.java new file mode 100644 index 00000000000..752b1786354 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SparseTensor.java @@ -0,0 +1,165 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow; + +import org.bytedeco.javacpp.PointerScope; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +/** + * A virtual type of {@link Tensor} composed of three dense tensors (indices, values and dimensions) + * used to represent the sparse data into a multi-dimensional dense space. + * + *

    Any tensor returned by a sparse tensor factory (e.g. {@link TInt64#sparseTensorOf(TInt64, + * TInt64, TInt64)}) can be casted back to this interface to access directly the dense tensors it is + * composed of. + * + *

    A sparse tensor will keep strong references to its dense tensors to prevent them to be + * released before it is closed itself. Likewise, closing a sparse tensor won't release the memory + * of its dense tensors until they in turn are closed. It is then important to protect not only the + * dense tensors within a try-with-resource block but the sparse tensor itself. + * + *

    For example, this code is perfectly safe: + * + *

    {@code
    + * TFloat64 createSparseTensor() {
    + *     try (TInt64 indices = TInt64.tensorOf(...);
    + *         TFloat64 values = TFloat64.vectorOf(...);
    + *         TInt64 denseShape = TInt64.vectorOf(...)) {
    + *         return TFloat64.sparseTensorOf(indices, values, denseShape);
    + *     }
    + * }
    + * try (TFloat64 sparseTensor = createSparseTensor()) {
    + *     ...
    + * }
    + * }
    + * + * @param type of data stored in the tensor + */ +public interface SparseTensor extends Tensor { + + /** + * Creates a sparse tensor from {@code indices}, {@code values} and {@code denseShape} dense + * tensors. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18, 3.8]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18}, and element {@code [2,4,0]} of the tensor has a value of {@code 3.8}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @throws IllegalArgumentException if shapes of the dense tensors are not compatible + */ + static SparseTensor of(TInt64 indices, T values, TInt64 denseShape) { + if (indices.rank() != 2) { + throw new IllegalArgumentException("Sparse indices must be a rank-2 tensor"); + } + if (values.rank() != 1) { + throw new IllegalArgumentException("Sparse values must be a rank-1 tensor"); + } + if (denseShape.rank() != 1) { + throw new IllegalArgumentException("Sparse shape must be a rank-1 tensor"); + } + if (indices.shape().get(0) != values.shape().get(0)) { + throw new IllegalArgumentException( + "Number of indices must be equal to the number of values [" + + indices.shape().get(0) + + " != " + + values.shape().get(0) + + "]"); + } + if (indices.shape().get(1) != denseShape.shape().get(0)) { + throw new IllegalArgumentException( + "Indices must have a coordinate for each dimensions of the tensor [" + + indices.shape().get(1) + + " != " + + denseShape.shape().get(0) + + "]"); + } + // Use mapper of the values tensor as this is the one giving the type of the sparse tensor as + // well + TensorMapper mapper = (TensorMapper) values.asRawTensor().typeInfo().mapper(); + + // Attach all tensors to a new pointer scope (this will increment their reference count) and + // preserve a strong reference to that scope inside the sparse tensor. This is done by + // extending this scope in the sparse tensor constructors, via mapSparse() + try (PointerScope scope = new PointerScope()) { + scope.attach(indices.asRawTensor().nativeHandle()); + scope.attach(values.asRawTensor().nativeHandle()); + scope.attach(denseShape.asRawTensor().nativeHandle()); + return mapper.mapSparse(indices, values, denseShape, scope); + } + } + + @Override + default RawTensor asRawTensor() { + throw new UnsupportedOperationException( + "Sparse tensors cannot be converted to a single raw tensor"); + } + + /** + * Returns this instance as a typed tensor. + * + *

    This method is equivalent to cast directly the {@code SparseTensor} instance to {@code + * T}. + * + * @return the typed tensor + */ + default T asTypedTensor() { + return (T) this; + } + + /** + * Gets the indices of the sparsed values. + * + *

    Indices are a 2-D long array of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain nonzero values (elements are zero-indexed). + * + *

    For example, {@code indices=[[1,3], [2,4]]} specifies that the elements with indexes of + * coordinates {@code [1,3]} and {@code [2,4]} have nonzero values. + * + * @return the indices + */ + TInt64 indices(); + + /** + * Gets the sparse values. + * + *

    Values are a 1-D array of type {@code T} and shape {@code [N]}, that supplies the values for + * each element in indices. + * + *

    For example, given {@code indices=[[1,3], [2,4]]}, and {@code values=[18, 3.6]} specifies + * that element {@code [1,3]} of the sparse tensor has a value of {@code 18}, and element {@code + * [2,4]} of the sparse tensor has a value of {@code 3.6}. + * + * @return the values + */ + T values(); + + /** + * Gets the sparse tensor dimensions defining the shape in that tensor in a dense space. + * + *

    Dimensions A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents to total number of element in dimension {@code i} in a dense version of that tensor. + * + * @return the dense shape + */ + TInt64 denseShape(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java index fc1275229bf..3b9deff9cd4 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java @@ -25,10 +25,10 @@ /** * A statically typed multi-dimensional array. * - *

    There are two categories of tensors in TensorFlow Java: {@link TType typed tensors} and - * {@link RawTensor raw tensors}. The former maps the tensor native memory to an - * n-dimensional typed data space, allowing direct I/O operations from the JVM, while the latter - * is only a reference to a native tensor allowing basic operations and flat data access.

    + *

    There are two categories of tensors in TensorFlow Java: {@link TType typed tensors} and {@link + * RawTensor raw tensors}. The former maps the tensor native memory to an n-dimensional typed data + * space, allowing direct I/O operations from the JVM, while the latter is only a reference to a + * native tensor allowing basic operations and flat data access. * *

    WARNING: Resources consumed by the Tensor object must be explicitly freed by * invoking the {@link #close()} method when the object is no longer needed. For example, using a @@ -39,6 +39,7 @@ * doSomethingWith(t); * } * } + * *

    Instances of a Tensor are not thread-safe. */ public interface Tensor extends Shaped, AutoCloseable { @@ -54,9 +55,9 @@ public interface Tensor extends Shaped, AutoCloseable { * @param shape shape of the tensor * @return an allocated but uninitialized tensor * @throws IllegalArgumentException if elements of the given {@code type} are of variable length - * (e.g. strings) - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * (e.g. strings) + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated */ static T of(Class type, Shape shape) { @@ -67,8 +68,8 @@ static T of(Class type, Shape shape) { * Allocates a tensor of a given datatype, shape and size. * *

    This method is identical to {@link #of(Class, Shape)}, except that the final size of the - * tensor can be explicitly set instead of computing it from the datatype and shape, which could be - * larger than the actual space required to store the data but not smaller. + * tensor can be explicitly set instead of computing it from the datatype and shape, which could + * be larger than the actual space required to store the data but not smaller. * * @param the tensor type * @param type the tensor type class @@ -77,17 +78,17 @@ static T of(Class type, Shape shape) { * @return an allocated but uninitialized tensor * @see #of(Class, Shape) * @throws IllegalArgumentException if {@code size} is smaller than the minimum space required to - * store the tensor data - * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given - * {@code type} are of variable length (e.g. strings) - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * store the tensor data + * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given {@code + * type} are of variable length (e.g. strings) + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated */ static T of(Class type, Shape shape, long size) { RawTensor tensor = RawTensor.allocate(type, shape, size); try { - return (T)tensor.asTypedTensor(); + return (T) tensor.asTypedTensor(); } catch (Exception e) { tensor.close(); throw e; @@ -114,12 +115,13 @@ static T of(Class type, Shape shape, long size) { * @param the tensor type * @param type the tensor type class * @param shape shape of the tensor - * @param dataInitializer method receiving accessor to the allocated tensor data for initialization + * @param dataInitializer method receiving accessor to the allocated tensor data for + * initialization * @return an allocated and initialized tensor * @throws IllegalArgumentException if elements of the given {@code type} are of variable length - * (e.g. strings) - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * (e.g. strings) + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated */ static T of(Class type, Shape shape, Consumer dataInitializer) { @@ -129,28 +131,30 @@ static T of(Class type, Shape shape, Consumer dataInitia /** * Allocates a tensor of a given datatype, shape and size. * - *

    This method is identical to {@link #of(Class, Shape, Consumer)}, except that the final - * size for the tensor can be explicitly set instead of being computed from the datatype and shape. + *

    This method is identical to {@link #of(Class, Shape, Consumer)}, except that the final size + * for the tensor can be explicitly set instead of being computed from the datatype and shape. * - *

    This could be useful for tensor types that stores data but also metadata in the tensor memory, - * such as the lookup table in a tensor of strings. + *

    This could be useful for tensor types that stores data but also metadata in the tensor + * memory, such as the lookup table in a tensor of strings. * * @param the tensor type * @param type the tensor type class * @param shape shape of the tensor * @param size size in bytes of the tensor or -1 to compute the size from the shape - * @param dataInitializer method receiving accessor to the allocated tensor data for initialization + * @param dataInitializer method receiving accessor to the allocated tensor data for + * initialization * @return an allocated and initialized tensor * @see #of(Class, Shape, long, Consumer) * @throws IllegalArgumentException if {@code size} is smaller than the minimum space required to - * store the tensor data - * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given - * {@code type} are of variable length (e.g. strings) - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * store the tensor data + * @throws IllegalArgumentException if {@code size} is set to -1 but elements of the given {@code + * type} are of variable length (e.g. strings) + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated */ - static T of(Class type, Shape shape, long size, Consumer dataInitializer) { + static T of( + Class type, Shape shape, long size, Consumer dataInitializer) { T tensor = of(type, shape, size); try { dataInitializer.accept(tensor); @@ -172,36 +176,46 @@ static T of(Class type, Shape shape, long size, Consumer * @param shape the tensor shape. * @param rawData a buffer containing the tensor raw data. * @throws IllegalArgumentException if {@code rawData} is not large enough to contain the tensor - * data - * @throws IllegalArgumentException if {@code shape} is totally or partially - * {@link Shape#hasUnknownDimension() unknown} + * data + * @throws IllegalArgumentException if {@code shape} is totally or partially {@link + * Shape#hasUnknownDimension() unknown} * @throws IllegalStateException if tensor failed to be allocated with the given parameters */ static T of(Class type, Shape shape, ByteDataBuffer rawData) { - return of(type, shape, rawData.size(), t -> rawData.copyTo(t.asRawTensor().data(), rawData.size())); + return of( + type, shape, rawData.size(), t -> rawData.copyTo(t.asRawTensor().data(), rawData.size())); } - /** - * Returns the {@link DataType} of elements stored in the tensor. - */ + /** Returns the {@link DataType} of elements stored in the tensor. */ DataType dataType(); - /** - * Returns the size, in bytes, of the tensor data. - */ + /** Returns the size, in bytes, of the tensor data. */ long numBytes(); - /** - * Returns the shape of the tensor. - */ + /** Returns the shape of the tensor. */ @Override Shape shape(); /** * Returns a raw (untyped) representation of this tensor + * + * @throws UnsupportedOperationException if this tensor is composed of other tensors, such as + * {@link SparseTensor sparse tensors}. */ RawTensor asRawTensor(); + /** + * Check if this tensor is sparse or not. + * + *

    When this methods retuns {@code true}, the tensor could be cast to a {@link SparseTensor + * SparseTensor} to access its indices, values and denseShape tensors. + * + * @retrun true if this tensor is a sparse + */ + default boolean isSparse() { + return false; + } + /** * Release resources associated with the Tensor. * diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java index 9896f55b55b..8660395b702 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorMapper.java @@ -16,12 +16,14 @@ */ package org.tensorflow; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.internal.c_api.TF_Tensor; +import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TType; /** - * Maps the native memory of a {@link RawTensor} to a n-dimensional typed data space - * accessible from the JVM. + * Maps the native memory of a {@link RawTensor} to a n-dimensional typed data space accessible from + * the JVM. * *

    Usage of this class is reserved for internal purposes only. * @@ -38,6 +40,21 @@ public abstract class TensorMapper { */ protected abstract T mapDense(RawTensor tensor); + /** + * Maps the provided dense {@code tensors} as a sparse tensor of type {@code T}. + * + * @param indices indices of the non-default values in a dense space + * @param values non-default values of the tensor + * @param denseShape size of the dimensions definining the shape of the sparse tensor in a dense + * space. + * @param tensorScope scope to extend to keep a reference on the sub-tensors composing this sparse + * tensor + * @return an instance of {@code T}, that could also be casted to a {@link SparseTensor + * SparseTensor} + */ + protected abstract SparseTensor mapSparse( + TInt64 indices, T values, TInt64 denseShape, PointerScope tensorScope); + /** * Helper for retrieving the native handle of a raw tensor * diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/SparseHelpers.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/SparseHelpers.java new file mode 100644 index 00000000000..0aa808e9a4a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/SparseHelpers.java @@ -0,0 +1,49 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow.internal.types; + +import org.tensorflow.SparseTensor; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.ndarray.StdArrays; +import org.tensorflow.ndarray.impl.dimension.DimensionalSpace; +import org.tensorflow.types.TInt64; + +/** Internal helper class for sparse tensor mappers */ +abstract class SparseHelpers { + + /** + * Convert a 1-D dense tensor, where each scalar represents the size of a dimension, to a {@link + * DimensionalSpace} instance as expected by the NdArray library. + * + * @param denseShape 1-D dense tensor holding the size of each dimensions + * @return a {@link DimensionalSpace} with these dimensions + */ + static DimensionalSpace toDimensionalSpace(TInt64 denseShape) { + return DimensionalSpace.create(Shape.of(StdArrays.array1dCopyOf(denseShape))); + } + + /** + * Compute the total number of bytes required to store a sparse tensor by adding the size of each + * of its dense sub-tensors. + * + * @param sparseTensor the sparse tensor + * @return the total number of bytes + */ + static long numBytes(SparseTensor sparseTensor) { + return sparseTensor.indices().numBytes() + + sparseTensor.values().numBytes() + + sparseTensor.denseShape().numBytes(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBfloat16Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBfloat16Mapper.java index 27688e55779..b03a12d3f76 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBfloat16Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBfloat16Mapper.java @@ -16,26 +16,38 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.FloatDataBuffer; import org.tensorflow.ndarray.buffer.layout.DataLayouts; import org.tensorflow.ndarray.impl.dense.FloatDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.FloatSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBfloat16; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_BFLOAT16} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_BFLOAT16} tensors to a + * n-dimensional data space. */ public final class TBfloat16Mapper extends TensorMapper { @Override protected TBfloat16 mapDense(RawTensor tensor) { - FloatDataBuffer buffer = DataLayouts.BFLOAT16.applyTo(TensorBuffers.toShorts(nativeHandle(tensor))); + FloatDataBuffer buffer = + DataLayouts.BFLOAT16.applyTo(TensorBuffers.toShorts(nativeHandle(tensor))); return new DenseTBfloat16(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TBfloat16 values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTBfloat16(indices, values, denseShape, tensorScope); + } + private static final class DenseTBfloat16 extends FloatDenseNdArray implements TBfloat16 { @Override @@ -43,6 +55,21 @@ public Class type() { return TBfloat16.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -55,4 +82,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTBfloat16 extends FloatSparseNdArray + implements TBfloat16, SparseTensor { + + @Override + public Class type() { + return TBfloat16.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TBfloat16 values() { + return (TBfloat16) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTBfloat16(TInt64 indices, TBfloat16 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0.0f, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBoolMapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBoolMapper.java index ff4c11a521b..6620d7fcb55 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBoolMapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TBoolMapper.java @@ -16,16 +16,21 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.BooleanDataBuffer; import org.tensorflow.ndarray.impl.dense.BooleanDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.BooleanSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TBool; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_BOOL} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_BOOL} tensors to a n-dimensional + * data space. */ public final class TBoolMapper extends TensorMapper { @@ -35,6 +40,12 @@ protected TBool mapDense(RawTensor tensor) { return new DenseTBool(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TBool values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTBool(indices, values, denseShape, tensorScope); + } + private static final class DenseTBool extends BooleanDenseNdArray implements TBool { @Override @@ -42,6 +53,21 @@ public Class type() { return TBool.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +80,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTBool extends BooleanSparseNdArray + implements TBool, SparseTensor { + + @Override + public Class type() { + return TBool.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TBool values() { + return (TBool) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTBool(TInt64 indices, TBool values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, false, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat16Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat16Mapper.java index fec84843f57..db22c81fe44 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat16Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat16Mapper.java @@ -16,26 +16,38 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.FloatDataBuffer; import org.tensorflow.ndarray.buffer.layout.DataLayouts; import org.tensorflow.ndarray.impl.dense.FloatDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.FloatSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat16; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_HALF} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_HALF} tensors to a n-dimensional + * data space. */ public final class TFloat16Mapper extends TensorMapper { @Override protected TFloat16 mapDense(RawTensor tensor) { - FloatDataBuffer buffer = DataLayouts.FLOAT16.applyTo(TensorBuffers.toShorts(nativeHandle(tensor))); + FloatDataBuffer buffer = + DataLayouts.FLOAT16.applyTo(TensorBuffers.toShorts(nativeHandle(tensor))); return new DenseTFloat16(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TFloat16 values, TInt64 denseShape, PointerScope tensorScope) { + return new TFloat16Mapper.SparseTFloat16(indices, values, denseShape, tensorScope); + } + private static final class DenseTFloat16 extends FloatDenseNdArray implements TFloat16 { @Override @@ -43,6 +55,21 @@ public Class type() { return TFloat16.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -55,4 +82,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTFloat16 extends FloatSparseNdArray + implements TFloat16, SparseTensor { + + @Override + public Class type() { + return TFloat16.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TFloat16 values() { + return (TFloat16) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTFloat16(TInt64 indices, TFloat16 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0.0f, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat32Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat32Mapper.java index 62fc0d226ac..33f9735d8dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat32Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat32Mapper.java @@ -16,16 +16,21 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.FloatDataBuffer; import org.tensorflow.ndarray.impl.dense.FloatDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.FloatSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_FLOAT} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_FLOAT} tensors to a + * n-dimensional data space. */ public final class TFloat32Mapper extends TensorMapper { @@ -35,6 +40,12 @@ protected TFloat32 mapDense(RawTensor tensor) { return new DenseTFloat32(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TFloat32 values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTFloat32(indices, values, denseShape, tensorScope); + } + private static final class DenseTFloat32 extends FloatDenseNdArray implements TFloat32 { @Override @@ -42,6 +53,21 @@ public Class type() { return TFloat32.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +80,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTFloat32 extends FloatSparseNdArray + implements TFloat32, SparseTensor { + + @Override + public Class type() { + return TFloat32.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TFloat32 values() { + return (TFloat32) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTFloat32(TInt64 indices, TFloat32 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0.0f, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat64Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat64Mapper.java index 375a7429950..e6e291bbfcf 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat64Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TFloat64Mapper.java @@ -16,16 +16,21 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.DoubleDataBuffer; import org.tensorflow.ndarray.impl.dense.DoubleDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.DoubleSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_DOUBLE} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_DOUBLE} tensors to a + * n-dimensional data space. */ public final class TFloat64Mapper extends TensorMapper { @@ -35,6 +40,12 @@ protected TFloat64 mapDense(RawTensor tensor) { return new DenseTFloat64(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TFloat64 values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTFloat64(indices, values, denseShape, tensorScope); + } + private static final class DenseTFloat64 extends DoubleDenseNdArray implements TFloat64 { @Override @@ -42,6 +53,21 @@ public Class type() { return TFloat64.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +80,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTFloat64 extends DoubleSparseNdArray + implements TFloat64, SparseTensor { + + @Override + public Class type() { + return TFloat64.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TFloat64 values() { + return (TFloat64) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTFloat64(TInt64 indices, TFloat64 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0L, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt32Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt32Mapper.java index fa0852a8b09..f316782eaea 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt32Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt32Mapper.java @@ -16,16 +16,21 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.IntDataBuffer; import org.tensorflow.ndarray.impl.dense.IntDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.IntSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt32; +import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_INT32} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_INT32} tensors to a + * n-dimensional data space. */ public final class TInt32Mapper extends TensorMapper { @@ -35,6 +40,12 @@ protected TInt32 mapDense(RawTensor tensor) { return new DenseTInt32(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TInt32 values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTInt32(indices, values, denseShape, tensorScope); + } + private static final class DenseTInt32 extends IntDenseNdArray implements TInt32 { @Override @@ -42,6 +53,21 @@ public Class type() { return TInt32.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +80,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTInt32 extends IntSparseNdArray + implements TInt32, SparseTensor { + + @Override + public Class type() { + return TInt32.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TInt32 values() { + return (TInt32) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTInt32(TInt64 indices, TInt32 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt64Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt64Mapper.java index c5f2325e25a..6ac46da3f31 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt64Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TInt64Mapper.java @@ -16,16 +16,20 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.LongDataBuffer; import org.tensorflow.ndarray.impl.dense.LongDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.LongSparseNdArray; +import org.tensorflow.proto.framework.DataType; import org.tensorflow.types.TInt64; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_INT64} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_INT64} tensors to a + * n-dimensional data space. */ public final class TInt64Mapper extends TensorMapper { @@ -35,6 +39,12 @@ protected TInt64 mapDense(RawTensor tensor) { return new DenseTInt64(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TInt64 values, TInt64 denseShape, PointerScope tensorScope) { + return new TInt64Mapper.SparseTInt64(indices, values, denseShape, tensorScope); + } + private static final class DenseTInt64 extends LongDenseNdArray implements TInt64 { @Override @@ -42,6 +52,21 @@ public Class type() { return TInt64.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +79,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTInt64 extends LongSparseNdArray + implements TInt64, SparseTensor { + + @Override + public Class type() { + return TInt64.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TInt64 values() { + return (TInt64) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTInt64(TInt64 indices, TInt64 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, 0L, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TStringMapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TStringMapper.java index de7c6016e0e..796794fc3c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TStringMapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TStringMapper.java @@ -18,7 +18,9 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.ByteSequenceProvider; import org.tensorflow.internal.buffer.ByteSequenceTensorBuffer; @@ -29,11 +31,14 @@ import org.tensorflow.ndarray.buffer.layout.DataLayout; import org.tensorflow.ndarray.buffer.layout.DataLayouts; import org.tensorflow.ndarray.impl.dense.DenseNdArray; +import org.tensorflow.ndarray.impl.sparse.SparseNdArray; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_STRING} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_STRING} tensors to a + * n-dimensional data space. */ public final class TStringMapper extends TensorMapper { @@ -42,13 +47,18 @@ public final class TStringMapper extends TensorMapper { @Override protected TString mapDense(RawTensor tensor) { - ByteSequenceTensorBuffer buffer = TensorBuffers.toStrings(nativeHandle(tensor), tensor.shape().size()); + ByteSequenceTensorBuffer buffer = + TensorBuffers.toStrings(nativeHandle(tensor), tensor.shape().size()); return new DenseTString(tensor, buffer, UTF_8_LAYOUT); } - /** - * Adds package-private methods to all instances of {@code TString} - */ + @Override + protected SparseTensor mapSparse( + TInt64 indices, TString values, TInt64 denseShape, PointerScope tensorScope) { + return new SparseTString(indices, values, denseShape, tensorScope); + } + + /** Adds package-private methods to all instances of {@code TString} */ interface TStringInternal extends TString { /** @@ -82,6 +92,21 @@ public Class type() { return TString.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -93,11 +118,83 @@ public RawTensor asRawTensor() { DenseTString( RawTensor rawTensor, ByteSequenceTensorBuffer buffer, - DataLayout, String> layout - ) { + DataLayout, String> layout) { super(layout.applyTo(buffer), rawTensor.shape()); this.rawTensor = rawTensor; this.buffer = buffer; } } + + private static final class SparseTString extends SparseNdArray + implements TString, SparseTensor { + + @Override + public Class type() { + return TString.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TString values() { + return (TString) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + @Override + public TString using(Charset charset) { + return new SparseTString( + indices(), values().using(charset), denseShape(), tensorScope, false); + } + + @Override + public NdArray asBytes() { + return SparseNdArray.create(byte[].class, indices(), values().asBytes(), dimensions()); + } + + SparseTString(TInt64 indices, TString values, TInt64 denseShape, PointerScope tensorScope) { + this(indices, values, denseShape, tensorScope, true); + } + + private SparseTString( + TInt64 indices, + TString values, + TInt64 denseShape, + PointerScope tensorScope, + boolean extendScope) { + super(String.class, indices, values, "", SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = extendScope ? tensorScope.extend() : tensorScope; + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint8Mapper.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint8Mapper.java index 427debd1ac8..24950fd456a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint8Mapper.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/types/TUint8Mapper.java @@ -16,16 +16,21 @@ */ package org.tensorflow.internal.types; +import org.bytedeco.javacpp.PointerScope; import org.tensorflow.RawTensor; +import org.tensorflow.SparseTensor; import org.tensorflow.TensorMapper; import org.tensorflow.internal.buffer.TensorBuffers; import org.tensorflow.ndarray.buffer.ByteDataBuffer; import org.tensorflow.ndarray.impl.dense.ByteDenseNdArray; +import org.tensorflow.ndarray.impl.sparse.ByteSparseNdArray; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt64; import org.tensorflow.types.TUint8; /** - * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_UINT8} tensors - * to a n-dimensional data space. + * Maps memory of {@link org.tensorflow.proto.framework.DataType#DT_UINT8} tensors to a + * n-dimensional data space. */ public final class TUint8Mapper extends TensorMapper { @@ -35,6 +40,12 @@ protected TUint8 mapDense(RawTensor tensor) { return new DenseTUint8(tensor, buffer); } + @Override + protected SparseTensor mapSparse( + TInt64 indices, TUint8 values, TInt64 denseShape, PointerScope tensorScope) { + return new TUint8Mapper.SparseTUint8(indices, values, denseShape, tensorScope); + } + private static final class DenseTUint8 extends ByteDenseNdArray implements TUint8 { @Override @@ -42,6 +53,21 @@ public Class type() { return TUint8.class; } + @Override + public DataType dataType() { + return asRawTensor().dataType(); + } + + @Override + public long numBytes() { + return asRawTensor().numBytes(); + } + + @Override + public void close() { + asRawTensor().close(); + } + @Override public RawTensor asRawTensor() { return rawTensor; @@ -54,4 +80,57 @@ public RawTensor asRawTensor() { this.rawTensor = rawTensor; } } + + private static final class SparseTUint8 extends ByteSparseNdArray + implements TUint8, SparseTensor { + + @Override + public Class type() { + return TUint8.class; + } + + @Override + public DataType dataType() { + return values().dataType(); + } + + @Override + public long numBytes() { + return SparseHelpers.numBytes(this); + } + + @Override + public void close() { + tensorScope.close(); + } + + @Override + public boolean isSparse() { + return true; + } + + @Override + public TInt64 indices() { + return (TInt64) getIndices(); + } + + @Override + public TUint8 values() { + return (TUint8) getValues(); + } + + @Override + public TInt64 denseShape() { + return denseShape; + } + + SparseTUint8(TInt64 indices, TUint8 values, TInt64 denseShape, PointerScope tensorScope) { + super(indices, values, (byte) 0, SparseHelpers.toDimensionalSpace(denseShape)); + this.denseShape = denseShape; + this.tensorScope = tensorScope.extend(); + } + + private final TInt64 denseShape; + private final PointerScope tensorScope; + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java index ef20b5ec2b6..da8774ce365 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TBfloat16Mapper; @@ -40,7 +41,7 @@ *

    Since there is no floating-point type that fits in 16 bits in Java, a conversion (with * potentially a precision loss) is required for each 32 bits value written or read on a tensor of * this type from the JVM. Therefore, if a lot of I/O operations are to be expected on a tensor, - * performances will be improved by working with {@link TFloat32} or {@link TFloat64} data types + * performances will be improved by working with {@link TFloat32} or {@link TBfloat16} data types * whenever possible. * *

    Note that some CPUs support the bfloat16 format natively, which can result in faster @@ -69,7 +70,8 @@ static TBfloat16 vectorOf(float... values) { if (values == null) { throw new IllegalArgumentException(); } - return Tensor.of(TBfloat16.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); + return Tensor.of( + TBfloat16.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); } /** @@ -116,5 +118,28 @@ static TBfloat16 tensorOf(Shape shape, FloatDataBuffer data) { static TBfloat16 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TBfloat16.class, shape, dataInit); } -} + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18f, 3.8f]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18f}, and element {@code [2,4,0]} of the tensor has a value of {@code 3.8f}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TBfloat16 sparseTensorOf(TInt64 indices, TBfloat16 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java index 0158c12b910..d1a1e4957ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TBoolMapper; @@ -108,4 +109,28 @@ static TBool tensorOf(Shape shape, BooleanDataBuffer data) { static TBool tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TBool.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of {@code false}. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[true, true]} specifies that element {@code [1,3,1]} and element {@code [2,4,0]} of + * the tensor have a value of {@code true}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TBool sparseTensorOf(TInt64 indices, TBool values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java index a43a0831f10..6ecab9d0591 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TFloat16Mapper; @@ -36,7 +37,7 @@ *

    Since there is no floating-point type that fits in 16 bits in Java, a conversion (with * potentially a precision loss) is required for each 32 bits value written or read on a tensor of * this type from the JVM. Therefore, if a lot of I/O operations are to be expected on a tensor, - * performances will be improved by working with {@link TFloat32} or {@link TFloat64} data types + * performances will be improved by working with {@link TFloat32} or {@link TFloat16} data types * whenever possible. * *

    Also, {@code TFloat16} tensors normally perform better if they are located in GPU memory since @@ -66,7 +67,8 @@ static TFloat16 vectorOf(float... values) { if (values == null) { throw new IllegalArgumentException(); } - return Tensor.of(TFloat16.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); + return Tensor.of( + TFloat16.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); } /** @@ -113,4 +115,28 @@ static TFloat16 tensorOf(Shape shape, FloatDataBuffer data) { static TFloat16 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TFloat16.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18f, 3.8f]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18f}, and element {@code [2,4,0]} of the tensor has a value of {@code 3.8f}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TFloat16 sparseTensorOf(TInt64 indices, TFloat16 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java index 35208f7de43..88fcc08cb5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TFloat32Mapper; @@ -54,7 +55,8 @@ static TFloat32 vectorOf(float... values) { if (values == null) { throw new IllegalArgumentException(); } - return Tensor.of(TFloat32.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); + return Tensor.of( + TFloat32.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); } /** @@ -101,4 +103,28 @@ static TFloat32 tensorOf(Shape shape, FloatDataBuffer data) { static TFloat32 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TFloat32.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18f, 3.8f]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18f}, and element {@code [2,4,0]} of the tensor has a value of {@code 3.8f}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TFloat32 sparseTensorOf(TInt64 indices, TFloat32 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java index 957612691e5..9960662fd79 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TFloat64Mapper; @@ -30,7 +31,6 @@ import org.tensorflow.types.annotation.TensorType; import org.tensorflow.types.family.TFloating; - /** IEEE-754 double-precision 64-bit float tensor type. */ @TensorType(dataType = DataType.DT_DOUBLE, byteSize = 8, mapperClass = TFloat64Mapper.class) public interface TFloat64 extends DoubleNdArray, TFloating { @@ -55,7 +55,8 @@ static TFloat64 vectorOf(double... values) { if (values == null) { throw new IllegalArgumentException(); } - return Tensor.of(TFloat64.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); + return Tensor.of( + TFloat64.class, Shape.of(values.length), data -> StdArrays.copyTo(values, data)); } /** @@ -102,4 +103,28 @@ static TFloat64 tensorOf(Shape shape, DoubleDataBuffer data) { static TFloat64 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TFloat64.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18, 3.8]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18}, and element {@code [2,4,0]} of the tensor has a value of {@code 3.8}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TFloat64 sparseTensorOf(TInt64 indices, TFloat64 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java index 8f6b587795b..9857154402a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.internal.types.TInt32Mapper; import org.tensorflow.ndarray.IntNdArray; @@ -100,5 +101,28 @@ static TInt32 tensorOf(Shape shape, IntDataBuffer data) { static TInt32 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TInt32.class, shape, dataInit); } -} + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18, 3]} specifies that element {@code [1,3,1]} of the sparse tensor has a value of + * {@code 18}, and element {@code [2,4,0]} of the tensor has a value of {@code 3}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TInt32 sparseTensorOf(TInt64 indices, TInt32 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java index 867248c5392..63b1e583376 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TInt64Mapper; @@ -101,4 +102,28 @@ static TInt64 tensorOf(Shape shape, LongDataBuffer data) { static TInt64 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TInt64.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18L, 3L]} specifies that element {@code [1,3,1]} of the sparse tensor has a value + * of {@code 18L}, and element {@code [2,4,0]} of the tensor has a value of {@code 3L}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TInt64 sparseTensorOf(TInt64 indices, TInt64 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java index b3000cc2f8a..33109e77fc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java @@ -20,6 +20,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.function.Function; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.internal.types.TStringInitializer; import org.tensorflow.internal.types.TStringMapper; @@ -105,7 +106,8 @@ static TString tensorOf(NdArray src) { * @return the new tensor */ static TString tensorOf(Charset charset, NdArray src) { - TStringInitializer initializer = new TStringInitializer<>(src, s -> s.getBytes(charset)); + TStringInitializer initializer = + new TStringInitializer<>(src, s -> s.getBytes(charset)); return Tensor.of(TString.class, src.shape(), initializer.computeRequiredSize(), initializer); } @@ -190,6 +192,31 @@ static TString tensorOfBytes(Shape shape, DataBuffer data) { return tensorOfBytes(NdArrays.wrap(shape, data)); } + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with an empty string as the default value. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=["one", "two"]} specifies that element {@code [1,3,1]} of the sparse tensor has a + * value of {@code "one"}, and element {@code [2,4,0]} of the tensor has a value of {@code + * "two"}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TString sparseTensorOf(TInt64 indices, TString values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } + /** * Use a specific charset for decoding data from a string tensor, instead of the default UTF-8. * diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java index eae86414cb4..056de2537a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java @@ -18,6 +18,7 @@ package org.tensorflow.types; import java.util.function.Consumer; +import org.tensorflow.SparseTensor; import org.tensorflow.Tensor; import org.tensorflow.exceptions.TensorFlowException; import org.tensorflow.internal.types.TUint8Mapper; @@ -101,4 +102,28 @@ static TUint8 tensorOf(Shape shape, ByteDataBuffer data) { static TUint8 tensorOf(Shape shape, Consumer dataInit) { return Tensor.of(TUint8.class, shape, dataInit); } + + /** + * Create a sparse tensors from {@code indices}, {@code values} and {@code denseShape} dense + * tensors, with a default value of zero. + * + *

    The returned instance also implements the {@link SparseTensor SparseTensor} + * interface, allowing a user to access directly the dense tensors when needed. + * + * @param indices A 2-D tensor of shape {@code [N, ndims]}, that specifies the indices of the + * elements in the sparse tensor that contain non-default values (elements are zero-indexed). + * For example, {@code indices=[[1,3,1], [2,4,0]]} specifies that the elements with indexes of + * {@code [1,3,1]} and {@code [2,4,0]} have non-default values. + * @param values A 1-D tensor of shape {@code [N]}, which supplies the values for each element in + * indices. For example, given {@code indices=[[1,3,1], [2,4,0]]}, the parameter {@code + * values=[18, 3]} specifies that element {@code [1,3,1]} of the sparse tensor has a value of + * {@code 18}, and element {@code [2,4,0]} of the tensor has a value of {@code 3}. + * @param denseShape A 1-D tensor of shape {@code [ndims]} where each the value at index {@code i} + * represents the size of dimension {@code i} in a dense version of that tensor. + * @return the new sparse tensor + * @see SparseTensor for more details on sparse tensors and how to release their memory properly + */ + static TUint8 sparseTensorOf(TInt64 indices, TUint8 values, TInt64 denseShape) { + return SparseTensor.of(indices, values, denseShape).asTypedTensor(); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/family/TType.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/family/TType.java index 9349fbb59ea..b5b3126bf74 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/family/TType.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/family/TType.java @@ -18,17 +18,16 @@ package org.tensorflow.types.family; import org.tensorflow.Tensor; -import org.tensorflow.proto.framework.DataType; /** * Common interface for all typed tensors. * *

    Typed tensors wrap a {@link org.tensorflow.RawTensor RawTensor} by mapping their native memory - * to a n-dimensional data space allowing direct I/O access from the JVM.

    + * to a n-dimensional data space allowing direct I/O access from the JVM. * *

    Subinterfaces of {@code TType} are propagated as a generic parameter to various entities of - * TensorFlow to identify the type of the tensor they carry. For example, a - * {@link org.tensorflow.Operand Operand<TFloat32>} is an operand which outputs a 32-bit floating + * TensorFlow to identify the type of the tensor they carry. For example, a {@link + * org.tensorflow.Operand Operand<TFloat32>} is an operand which outputs a 32-bit floating * point tensor. This parameter ensure type-compatibility between operands of a computation at * compile-time. For example: * @@ -43,41 +42,24 @@ * tf.math.add(c1, c3); // Compilation failure * } * - *

    Even if all typed tensors implements somehow {@link org.tensorflow.ndarray.NdArray NdArray} - * to provide access to their data, {@code TType} deliberately does not extend directly from this + *

    Even if all typed tensors implements somehow {@link org.tensorflow.ndarray.NdArray NdArray} to + * provide access to their data, {@code TType} deliberately does not extend directly from this * interface, for the following reasons: + * *

      *
    • Implementing {@code NdArray} at this level could only expose boxed-type accessors, which - * are less performant than their primitive equivalent, only exposed by subinterfaces of - * {@code NdArray} (e.g. {@code FloatNdArray}). - *
    • + * are less performant than their primitive equivalent, only exposed by subinterfaces of + * {@code NdArray} (e.g. {@code FloatNdArray}). *
    • {@code TType} would need to carry a new generic parameter for typing the {@code NdArray}, - * which will increase the verbosity in the signature of any method accepting or returning - * an instance of this interface, which is very common. - *
    • + * which will increase the verbosity in the signature of any method accepting or returning an + * instance of this interface, which is very common. *
    - * Therefore, enforcing the user to cast a reference of {@code TType} in a concrete tensor type before - * accessing its data guarantees better performance and improves readability. + * + * Therefore, enforcing the user to cast a reference of {@code TType} in a concrete tensor type + * before accessing its data guarantees better performance and improves readability. */ public interface TType extends Tensor { - /** - * Returns the type of this tensor as a registered subclass of {@code TType} - */ + /** Returns the type of this tensor as a registered subclass of {@code TType} */ Class type(); - - @Override - default DataType dataType() { - return asRawTensor().dataType(); - } - - @Override - default long numBytes() { - return asRawTensor().numBytes(); - } - - @Override - default void close() { - asRawTensor().close(); - } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SparseTensorTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SparseTensorTest.java new file mode 100644 index 00000000000..753c3af7ac1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SparseTensorTest.java @@ -0,0 +1,207 @@ +/* + * Copyright 2022 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ======================================================================= + */ +package org.tensorflow; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.tensorflow.ndarray.LongNdArray; +import org.tensorflow.ndarray.NdArrays; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.ndarray.StdArrays; +import org.tensorflow.op.Ops; +import org.tensorflow.op.sparse.SparseSplit; +import org.tensorflow.types.TFloat64; +import org.tensorflow.types.TInt64; +import org.tensorflow.types.family.TType; + +public class SparseTensorTest { + + @Test + public void createSparseTensor() { + long[][] indicesArray = new long[][] {{0, 0, 3}, {0, 2, 3}, {1, 0, 0}, {2, 2, 1}}; + try (TInt64 indices = TInt64.tensorOf(StdArrays.ndCopyOf(indicesArray)); + TFloat64 values = TFloat64.vectorOf(10.0, 20.0, 30.0, 40.0); + TInt64 denseShape = TInt64.vectorOf(3, 3, 4); + TFloat64 tensor = TFloat64.sparseTensorOf(indices, values, denseShape)) { + + assertNotNull(tensor); + assertEquals(Shape.of(3, 3, 4), tensor.shape()); + + tensor + .scalars() + .forEachIndexed( + (coords, scalar) -> { + if (Arrays.equals(coords, indicesArray[0])) { + assertEquals(10.0, scalar.getDouble()); + } else if (Arrays.equals(coords, indicesArray[1])) { + assertEquals(20.0, scalar.getDouble()); + } else if (Arrays.equals(coords, indicesArray[2])) { + assertEquals(30.0, scalar.getDouble()); + } else if (Arrays.equals(coords, indicesArray[3])) { + assertEquals(40.0, scalar.getDouble()); + } else { + assertEquals(0.0, scalar.getDouble()); + } + }); + + assertTrue(SparseTensor.class.isAssignableFrom(tensor.getClass())); + SparseTensor sparseTensor = (SparseTensor) tensor; + assertEquals(indices, sparseTensor.indices()); + assertEquals(values, sparseTensor.values()); + assertEquals(denseShape, sparseTensor.denseShape()); + } + } + + @Test + public void splitSparseTensor() { + Ops tf = Ops.create(); + LongNdArray indicesArray = + StdArrays.ndCopyOf(new long[][] {{0, 0}, {0, 2}, {1, 0}, {1, 1}, {1, 3}, {2, 2}}); + try (TInt64 indices = TInt64.tensorOf(indicesArray); + TFloat64 values = TFloat64.vectorOf(10.0, 20.0, 30.0, 40.0, 50.0, 60.0); + TInt64 dimensions = TInt64.vectorOf(3, 4); + SparseTensor sparseTensor = SparseTensor.of(indices, values, dimensions)) { + + // [10.0 0.0 20.0 0.0] [10.0 0.0] [20.0 0.0] + // [30.0 40.0 0.0 50.0] ==> [30.0 40.0] + [ 0.0 50.0] + // [ 0.0 0.0 60.0 0.0] [ 0.0 0.0] [60.0 0.0] + SparseSplit split = + tf.sparse.sparseSplit( + tf.constant(1L), + tf.constant(sparseTensor.indices()), + tf.constant(sparseTensor.values()), + tf.constant(sparseTensor.denseShape()), + 2L); + List> splitIndices = split.outputIndices(); + List> splitValues = split.outputValues(); + List> splitDenseShape = split.outputShape(); + + assertEquals(2, splitIndices.size()); + assertEquals(2, splitValues.size()); + assertEquals(2, splitDenseShape.size()); + + SparseTensor sparsePart1 = + SparseTensor.of( + splitIndices.get(0).asTensor(), + splitValues.get(0).asTensor(), + splitDenseShape.get(0).asTensor()); + assertEquals( + StdArrays.ndCopyOf(new long[][] {{0, 0}, {1, 0}, {1, 1}}), sparsePart1.indices()); + assertEquals(NdArrays.vectorOf(10.0, 30.0, 40.0), sparsePart1.values()); + assertEquals(NdArrays.vectorOf(3L, 2L), sparsePart1.denseShape()); + + SparseTensor sparsePart2 = + SparseTensor.of( + splitIndices.get(1).asTensor(), + splitValues.get(1).asTensor(), + splitDenseShape.get(1).asTensor()); + assertEquals( + StdArrays.ndCopyOf(new long[][] {{0, 0}, {1, 1}, {2, 0}}), sparsePart2.indices()); + assertEquals(NdArrays.vectorOf(20.0, 50.0, 60.0), sparsePart2.values()); + assertEquals(NdArrays.vectorOf(3L, 2L), sparsePart2.denseShape()); + } + } + + @Test + void releaseSparseTensorBeforeTensors() { + TensorState state = null; + + try (TInt64 indices = TInt64.tensorOf(StdArrays.ndCopyOf(new long[][] {{3}, {6}, {9}})); + TFloat64 values = TFloat64.vectorOf(30.0, 60.0, 90.0); + TInt64 denseShape = TInt64.vectorOf(10); + TFloat64 sparseTensor = TFloat64.sparseTensorOf(indices, values, denseShape)) { + state = new TensorState(sparseTensor); + assertFalse(state.isClosed()); + } + assertTrue(state.isClosed()); + + try (TInt64 indices = TInt64.tensorOf(StdArrays.ndCopyOf(new long[][] {{3}, {6}, {9}})); + TFloat64 values = TFloat64.vectorOf(30.0, 60.0, 90.0); + TInt64 denseShape = TInt64.vectorOf(10)) { + try (TFloat64 sparseTensor = TFloat64.sparseTensorOf(indices, values, denseShape)) { + state = new TensorState(sparseTensor); + assertFalse(state.isClosed()); + } + assertFalse(state.isClosed()); + } + assertTrue(state.isClosed()); + } + + @Test + void releaseTensorsBeforeSparseTensor() { + TensorState state = null; + + try (TFloat64 sparseTensor = createSparseTensorForTest()) { + state = new TensorState(sparseTensor); + assertFalse(state.isClosed()); + } + assertTrue(state.isClosed()); + + TFloat64 sparseTensor; + try (TInt64 indices = TInt64.tensorOf(StdArrays.ndCopyOf(new long[][] {{3}, {6}, {9}})); + TFloat64 values = TFloat64.vectorOf(30.0, 60.0, 90.0); + TInt64 denseShape = TInt64.vectorOf(10)) { + sparseTensor = TFloat64.sparseTensorOf(indices, values, denseShape); + state = new TensorState(sparseTensor); + assertFalse(state.isClosed()); + } + assertFalse(state.isClosed()); // Not closing the sparse tensor would leak + sparseTensor.close(); + assertTrue(state.isClosed()); + } + + private TFloat64 createSparseTensorForTest() { + try (TInt64 indices = TInt64.tensorOf(StdArrays.ndCopyOf(new long[][] {{3}, {6}, {9}})); + TFloat64 values = TFloat64.vectorOf(30.0, 60.0, 90.0); + TInt64 denseShape = TInt64.vectorOf(10)) { + return TFloat64.sparseTensorOf(indices, values, denseShape); + } + } + + private static final class TensorState { + + TensorState(TFloat64 tensor) { + SparseTensor sparseTensor = (SparseTensor) tensor; + this.indicesRef = sparseTensor.indices(); + this.valuesRef = sparseTensor.values(); + this.denseShapeRef = sparseTensor.denseShape(); + } + + boolean isClosed() { + try { + // nativeHandle() will throw if the tensor has been closed + indicesRef.asRawTensor().nativeHandle(); + valuesRef.asRawTensor().nativeHandle(); + denseShapeRef.asRawTensor().nativeHandle(); + return false; + + } catch (IllegalStateException e) { + return true; + } + } + + private TInt64 indicesRef = null; + private TFloat64 valuesRef = null; + private TInt64 denseShapeRef = null; + } +} From 8fd9362870a7fd768c1f18989159ee1fefbd1def Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Mon, 7 Feb 2022 23:29:03 -0500 Subject: [PATCH 10/21] Adds a closeable session result (#411) --- .../java/org/tensorflow/ConcreteFunction.java | 10 +- .../src/main/java/org/tensorflow/Result.java | 199 ++++++++++++++++++ .../java/org/tensorflow/SavedModelBundle.java | 2 +- .../src/main/java/org/tensorflow/Session.java | 62 +++--- .../java/org/tensorflow/SessionFunction.java | 34 +-- .../src/main/java/org/tensorflow/Tensor.java | 2 +- .../java/org/tensorflow/TensorFunction.java | 29 ++- .../org/tensorflow/AutoCloseableList.java | 27 --- .../org/tensorflow/ConcreteFunctionTest.java | 23 +- .../org/tensorflow/CustomGradientTest.java | 3 +- .../java/org/tensorflow/DeviceSpecTest.java | 130 ++++++------ .../test/java/org/tensorflow/GraphTest.java | 39 ++-- .../org/tensorflow/SavedModelBundleTest.java | 15 +- .../test/java/org/tensorflow/SessionTest.java | 32 ++- .../op/core/BooleanMaskUpdateTest.java | 9 +- .../org/tensorflow/op/core/ConstantTest.java | 18 +- .../org/tensorflow/op/core/GradientsTest.java | 25 +-- .../org/tensorflow/op/core/ZerosTest.java | 4 +- .../framework/data/DatasetIteratorTest.java | 14 +- .../framework/data/MapDatasetTest.java | 15 +- .../metrics/impl/AssertBroadcastableTest.java | 15 +- .../metrics/impl/BroadcastWeightsTest.java | 91 ++++---- .../optimizers/GradientDescentTest.java | 20 +- 23 files changed, 477 insertions(+), 341 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Result.java delete mode 100644 tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/AutoCloseableList.java diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java index 4d07b678811..c822678fda6 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ConcreteFunction.java @@ -1,4 +1,4 @@ -/* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020-2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -295,8 +295,8 @@ public Operand call(Scope scope, Operand argument) { } @Override - public Map call(Map arguments) { - // FIXME need to manage input/output operand lifetimes + public Result call(Map arguments) { + // FIXME need to manage input operand lifetimes Ops tf = Ops.create(); Map> inputs = new LinkedHashMap<>(arguments.size()); @@ -305,11 +305,11 @@ public Map call(Map arguments) { inputs.put(inputName, tf.constantOf((TType) argument)); } Map> outputs = tf.call(this, inputs); - Map tensorOutputs = new LinkedHashMap<>(outputs.size()); + LinkedHashMap tensorOutputs = new LinkedHashMap<>(outputs.size()); for (String outputName : outputs.keySet()) { tensorOutputs.put(outputName, outputs.get(outputName).asTensor()); } - return tensorOutputs; + return new Result(tensorOutputs); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Result.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Result.java new file mode 100644 index 00000000000..a3560b068b1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Result.java @@ -0,0 +1,199 @@ +/* +Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ +package org.tensorflow; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.tensorflow.exceptions.TensorFlowException; +import org.tensorflow.proto.framework.RunMetadata; + +/** + * An {@link AutoCloseable} wrapper around a {@link Map} containing {@link Tensor}s. + * + *

    When this is closed it closes all the {@link Tensor}s inside it. If you maintain a reference + * to a value after this object has been closed it will throw an {@link IllegalStateException} upon + * access. + * + *

    This class is not thread-safe with respect to the close operation. Multiple closers or one + * thread closing a tensor while another is reading may throw exceptions. + * + *

    Note this class is used to manage the lifetimes of tensors produced by the TensorFlow runtime, + * from sessions and function calls. It is not used as an argument to {@code session.run} or + * function calls as users are in control of the creation of input tensors. + */ +public final class Result implements AutoCloseable, Iterable> { + @Override + public void close() { + if (!closed) { + for (Tensor t : list) { + try { + t.close(); + } catch (TensorFlowException e) { + logger.log(Level.WARNING, "Exception raised when closing tensor inside result.", e); + } + } + closed = true; + } else { + logger.warning("Closing an already closed Result"); + } + } + + @Override + public Iterator> iterator() { + if (!closed) { + return map.entrySet().iterator(); + } else { + throw new IllegalStateException("Result is closed"); + } + } + + /** + * Returns the number of outputs in this Result. + * + * @return The number of outputs. + */ + public int size() { + return map.size(); + } + + /** + * Gets the set containing all the tensor names. + * + * @return The tensor names set. + */ + public Set keySet() { + return Collections.unmodifiableSet(map.keySet()); + } + + /** + * Does this result object have a tensor for the supplied key? + * + * @param key The key to check. + * @return True if this result object has a tensor for this key. + */ + public boolean containsKey(String key) { + return map.containsKey(key); + } + + /** + * Gets the value from the container at the specified index. + * + *

    Throws {@link IllegalStateException} if the container has been closed, and {@link + * IndexOutOfBoundsException} if the index is invalid. + * + * @param index The index to lookup. + * @return The value at the index. + */ + public Tensor get(int index) { + if (!closed) { + return list.get(index); + } else { + throw new IllegalStateException("Result is closed"); + } + } + + /** + * Gets the value from the container assuming it's not been closed. + * + *

    Throws {@link IllegalStateException} if the container has been closed. + * + * @param key The key to lookup. + * @return Optional.of the value if it exists. + */ + public Optional get(String key) { + if (!closed) { + return Optional.ofNullable(map.get(key)); + } else { + throw new IllegalStateException("Result is closed"); + } + } + + /** + * Metadata about the run. + * + *

    A RunMetadata + * protocol buffer. + */ + public Optional getMetadata() { + return Optional.ofNullable(metadata); + } + + /** + * Creates a Result from the names and values produced by {@link Session.Runner#run()}. + * + * @param names The output names. + * @param values The output values. + * @param metadata The run metadata, may be null. + */ + Result(List names, List values, RunMetadata metadata) { + this.map = new LinkedHashMap<>(); + this.list = new ArrayList<>(values); + + if (names.size() != values.size()) { + throw new IllegalArgumentException( + "Expected same number of names and values, found names.length = " + + names.size() + + ", values.length = " + + values.size()); + } + + for (int i = 0; i < names.size(); i++) { + Tensor old = this.map.put(names.get(i), values.get(i)); + if (old != null) { + throw new IllegalArgumentException( + "Name collision in the result set, two outputs are named '" + names.get(i) + "'"); + } + } + this.metadata = metadata; + this.closed = false; + } + + /** + * Creates a Result from the names and values. + * + * @param outputs The run outputs. + */ + Result(LinkedHashMap outputs) { + this.map = outputs; + this.list = new ArrayList<>(outputs.size()); + for (Map.Entry e : outputs.entrySet()) { + list.add(e.getValue()); + } + this.metadata = null; + this.closed = false; + } + + private final Map map; + + private final List list; + + private final RunMetadata metadata; + + private boolean closed; + + private static final Logger logger = Logger.getLogger(Result.class.getName()); +} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java index 4295dbb6c4a..35d81e7bc16 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SavedModelBundle.java @@ -468,7 +468,7 @@ public List functions() { * @return list of output tensors, mapped by the signature name * @throws IllegalArgumentException if no function can be selected by default */ - public Map call(Map arguments) { + public Result call(Map arguments) { SessionFunction function = null; if (functions.size() == 1) { function = functions.values().iterator().next(); diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java index 71fdcec3f41..76be5597cc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Session.java @@ -1,4 +1,4 @@ -/* Copyright 2019-2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019-2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -306,7 +306,9 @@ public Runner feed(Operand operand, Tensor t) { * @throws IllegalArgumentException if no output exists with the provided name */ public Runner fetch(String operation) { - return fetch(graph.outputOrThrow(operation)); + Runner r = fetch(graph.outputOrThrow(operation), false); + outputNames.add(operation); + return r; } /** @@ -336,6 +338,20 @@ public Runner fetch(String operation, int index) { * @return this session runner */ public Runner fetch(Output output) { + return fetch(output, true); + } + + /** + * Makes {@link #run()} return the Tensor referred to by {@code output}. + * + *

    If {@code output} is a resource variable, will fetch the value. + * + * @param output the node to fetch the tensor from + * @param recordName Records the output name. If false the output name must be recorded by the + * calling method as otherwise the result object will throw on construction. + * @return this session runner + */ + private Runner fetch(Output output, boolean recordName) { if (output.env() != graph) { throw new IllegalStateException( "Can't fetch output " @@ -378,6 +394,9 @@ public Runner fetch(Output output) { } else { outputs.add(output); } + if (recordName) { + outputNames.add(output.name()); + } return this; } @@ -490,13 +509,13 @@ private void doInit() { * * @return list of resulting tensors fetched by this session runner */ - public List run() { + public Result run() { doInit(); return runNoInit(); } - List runNoInit() { - return runHelper(false).outputs; + Result runNoInit() { + return runHelper(false); } /** @@ -509,12 +528,12 @@ List runNoInit() { * * @return list of resulting tensors fetched by this session runner, with execution metadata */ - public Run runAndFetchMetadata() { + public Result runAndFetchMetadata() { doInit(); return runHelper(true); } - private Run runHelper(boolean wantMetadata) { + private Result runHelper(boolean wantMetadata) { TF_Tensor[] inputTensorHandles = new TF_Tensor[inputTensors.size()]; TF_Operation[] inputOpHandles = new TF_Operation[inputs.size()]; int[] inputOpIndices = new int[inputs.size()]; @@ -569,10 +588,7 @@ private Run runHelper(boolean wantMetadata) { } finally { runRef.close(); } - Run ret = new Run(); - ret.outputs = outputs; - ret.metadata = metadata; - return ret; + return new Result(outputNames, outputs, metadata); } private class Reference implements AutoCloseable { @@ -602,6 +618,7 @@ public void close() { private final ArrayList> inputs = new ArrayList<>(); private final ArrayList inputTensors = new ArrayList<>(); private final ArrayList> outputs = new ArrayList<>(); + private final ArrayList outputNames = new ArrayList<>(); private final ArrayList targets = new ArrayList<>(); private RunOptions runOptions = null; } @@ -648,8 +665,9 @@ public SessionFunction function(Signature signature) { * * @param signature the signature of the function * @param arguments the arguments to call with. + * @return The results of the function call. */ - public Map run(Signature signature, Map arguments) { + public Result run(Signature signature, Map arguments) { return function(signature).call(arguments); } @@ -698,26 +716,6 @@ public void restore(String prefix) { setInitialized(); } - /** - * Output tensors and metadata obtained when executing a session. - * - *

    See {@link Runner#runAndFetchMetadata()} - */ - public static final class Run { - - /** Tensors from requested fetches. */ - public List outputs; - - /** - * Metadata about the run. - * - *

    A RunMetadata - * protocol buffer. - */ - public RunMetadata metadata; - } - Graph graph() { return graph; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java index 07bc418ac51..877ba1b2f2c 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/SessionFunction.java @@ -1,23 +1,22 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021-2022 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import java.io.IOException; import java.util.LinkedHashMap; -import java.util.List; import java.util.Map; /** @@ -89,7 +88,7 @@ public SessionFunction withNewSession(Session session) { } @Override - public Map call(Map arguments) { + public Result call(Map arguments) { Session.Runner runner = session.runner(); signature .getInputs() @@ -113,15 +112,16 @@ public Map call(Map arguments) { signature.getOutputs().values().forEach(x -> runner.fetch(x.name)); - List results = runner.run(); + Result results = runner.run(); - Map outputs = new LinkedHashMap<>(results.size()); + // Unpack the result object and rebuild it with the expected names. + LinkedHashMap outputs = new LinkedHashMap<>(results.size()); int i = 0; for (String outputName : signature.outputNames()) { outputs.put(outputName, results.get(i)); i++; } - return outputs; + return new Result(outputs); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java index 3b9deff9cd4..2ba3dc0a906 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java @@ -210,7 +210,7 @@ static T of(Class type, Shape shape, ByteDataBuffer rawData *

    When this methods retuns {@code true}, the tensor could be cast to a {@link SparseTensor * SparseTensor} to access its indices, values and denseShape tensors. * - * @retrun true if this tensor is a sparse + * @return true if this tensor is a sparse */ default boolean isSparse() { return false; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java index 0304d786494..1b83a1176ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/TensorFunction.java @@ -1,18 +1,18 @@ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import java.util.LinkedHashMap; @@ -28,7 +28,7 @@ public interface TensorFunction { /** * Invokes a function using the default eager session. * - *

    Caller is responsible for closing all Tensors. + *

    Caller is responsible for close the result object. * * @param arguments list of tensors to pass in input to the function, mapped by their signature * name @@ -37,7 +37,7 @@ public interface TensorFunction { * @throws IllegalArgumentException if the passed arguments don't match up to the function's * parameters. */ - Map call(Map arguments); + Result call(Map arguments); /** * Invokes a function with a single input and output using the default eager session. @@ -76,12 +76,11 @@ default Tensor call(Tensor tensor) { } String inputName = signature().inputNames().iterator().next(); - String outputName = signature().outputNames().iterator().next(); Map inputMap = new LinkedHashMap<>(); inputMap.put(inputName, tensor); - return call(inputMap).get(outputName); + return call(inputMap).get(0); } static Operand validateDescription( diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/AutoCloseableList.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/AutoCloseableList.java deleted file mode 100644 index 330a40bae6b..00000000000 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/AutoCloseableList.java +++ /dev/null @@ -1,27 +0,0 @@ -package org.tensorflow; - -import java.util.ArrayList; -import java.util.Collection; - -public final class AutoCloseableList extends ArrayList - implements AutoCloseable { - - public AutoCloseableList(Collection c) { - super(c); - } - - @Override - public void close() { - Exception toThrow = null; - for (AutoCloseable c : this) { - try { - c.close(); - } catch (Exception e) { - toThrow = e; - } - } - if (toThrow != null) { - throw new RuntimeException(toThrow); - } - } -} diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java index 250ff9cc383..b303618eae2 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/ConcreteFunctionTest.java @@ -162,9 +162,9 @@ public void testFunctionWithTwoOutputs() { Map inputs = new HashMap<>(); inputs.put("x", TInt32.scalarOf(2)); - Map outputs = cf.call(inputs); - assertEquals(4, ((TInt32) outputs.get("dbl")).getInt()); - assertEquals(6, ((TInt32) outputs.get("trpl")).getInt()); + Result outputs = cf.call(inputs); + assertEquals(4, ((TInt32) outputs.get("dbl").get()).getInt()); + assertEquals(6, ((TInt32) outputs.get("trpl").get()).getInt()); } private static Signature square(Ops tf) { @@ -205,15 +205,14 @@ public void testGradientsGraph() { try (TFloat32 c1 = TFloat32.scalarOf(3.0f); TFloat32 c2 = TFloat32.scalarOf(2.0f); - AutoCloseableList outputs = - new AutoCloseableList<>( - s.runner() - .feed(x1, c1) - .feed(x2, c2) - .fetch(grads0[0]) - .fetch(grads1[0]) - .fetch(grads1[1]) - .run())) { + Result outputs = + s.runner() + .feed(x1, c1) + .feed(x2, c2) + .fetch(grads0[0]) + .fetch(grads1[0]) + .fetch(grads1[1]) + .run()) { assertEquals(3, outputs.size()); assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java index 62626c35641..0ad94ad2130 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/CustomGradientTest.java @@ -66,8 +66,7 @@ public void testCustomGradient() { assertEquals(DataType.DT_FLOAT, grads0[0].dataType()); try (TFloat32 c1 = TFloat32.vectorOf(3.0f, 2.0f, 1.0f, 0.0f); - AutoCloseableList outputs = - new AutoCloseableList<>(s.runner().feed(x, c1).fetch(grads0[0]).run())) { + Result outputs = s.runner().feed(x, c1).fetch(grads0[0]).run()) { assertEquals(1, outputs.size()); assertEquals(0.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/DeviceSpecTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/DeviceSpecTest.java index e4340da3275..28a549d72ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/DeviceSpecTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/DeviceSpecTest.java @@ -14,6 +14,11 @@ ==============================================================================*/ package org.tensorflow; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.tensorflow.DeviceSpec.DeviceType; + import org.junit.jupiter.api.Test; import org.tensorflow.exceptions.TFInvalidArgumentException; import org.tensorflow.op.Ops; @@ -21,92 +26,87 @@ import org.tensorflow.proto.framework.ConfigProto; import org.tensorflow.types.TInt32; -import static com.google.common.truth.Truth.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; -import static org.tensorflow.DeviceSpec.DeviceType; - /** Tests for {@link DeviceSpec}. */ public class DeviceSpecTest { @Test public void withDeviceMethod() { - ConfigProto config = ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) + ConfigProto config = + ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) .setLogDevicePlacement(true) .build(); - try (Graph g = new Graph(); Session session = new Session(g, config)) { + try (Graph g = new Graph(); + Session session = new Session(g, config)) { Ops tf = Ops.create(g).withSubScope("testScope"); Constant aOps = tf.constant(-1); - DeviceSpec deviceSpec = DeviceSpec.newBuilder() + DeviceSpec deviceSpec = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) .deviceType(DeviceSpec.DeviceType.CPU) .build(); - Output absOps = tf - .withName("absWithDevice") - .withDevice(deviceSpec) - .math - .abs(aOps) - .asOutput(); + Output absOps = + tf.withName("absWithDevice").withDevice(deviceSpec).math.abs(aOps).asOutput(); - try (AutoCloseableList t = - new AutoCloseableList<>(session.runner().fetch(absOps).run())) { - assertEquals(1, ((TInt32)t.get(0)).getInt()); + try (Result t = session.runner().fetch(absOps).run()) { + assertEquals(1, ((TInt32) t.get(0)).getInt()); } } } @Test public void withEmptyDeviceSpec() { - ConfigProto config = ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) + ConfigProto config = + ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) .setLogDevicePlacement(true) .build(); - try (Graph g = new Graph(); Session session = new Session(g, config)) { + try (Graph g = new Graph(); + Session session = new Session(g, config)) { Ops tf = Ops.create(g).withSubScope("testScope"); Constant aOps = tf.constant(-1); - DeviceSpec deviceSpec = DeviceSpec.newBuilder() + DeviceSpec deviceSpec = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) .deviceType(DeviceSpec.DeviceType.CPU) .build(); - Output absOps = tf - .withName("absWithDevice") - .withDevice(deviceSpec) - .math - .abs(aOps) - .asOutput(); + Output absOps = + tf.withName("absWithDevice").withDevice(deviceSpec).math.abs(aOps).asOutput(); - try (AutoCloseableList t = - new AutoCloseableList<>(session.runner().fetch(absOps).run())) { - assertEquals(1, ((TInt32)t.get(0)).getInt()); + try (Result t = session.runner().fetch(absOps).run()) { + assertEquals(1, ((TInt32) t.get(0)).getInt()); } } } @Test public void withTwoScopes() { - ConfigProto config = ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) + ConfigProto config = + ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) .setLogDevicePlacement(true) .build(); - try (Graph g = new Graph(); Session session = new Session(g, config)) { - DeviceSpec deviceSpec1 = DeviceSpec.newBuilder() + try (Graph g = new Graph(); + Session session = new Session(g, config)) { + DeviceSpec deviceSpec1 = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) .deviceType(DeviceSpec.DeviceType.CPU) .build(); - DeviceSpec deviceSpec2 = DeviceSpec.newBuilder() + DeviceSpec deviceSpec2 = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) @@ -119,33 +119,27 @@ public void withTwoScopes() { Constant aOps = tf1.constant(-1); Constant bOps = tf2.constant(10); - Output absOps = tf1 - .withName("absWithDevice") - .math - .abs(aOps) - .asOutput(); + Output absOps = tf1.withName("absWithDevice").math.abs(aOps).asOutput(); - Output mulOps = tf2 - .withName("mulWithDevice") - .math - .mul(absOps, bOps) - .asOutput(); + Output mulOps = tf2.withName("mulWithDevice").math.mul(absOps, bOps).asOutput(); - try (AutoCloseableList t = - new AutoCloseableList<>(session.runner().fetch(mulOps).run())) { - assertEquals(10, ((TInt32)t.get(0)).getInt()); + try (Result t = session.runner().fetch(mulOps).run()) { + assertEquals(10, ((TInt32) t.get(0)).getInt()); } } } @Test public void withIncorrectDeviceSpec() { - ConfigProto config = ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) + ConfigProto config = + ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) .setLogDevicePlacement(true) .build(); - try (Graph g = new Graph(); Session session = new Session(g, config)) { - DeviceSpec correctDeviceSpec = DeviceSpec.newBuilder() + try (Graph g = new Graph(); + Session session = new Session(g, config)) { + DeviceSpec correctDeviceSpec = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) @@ -153,7 +147,8 @@ public void withIncorrectDeviceSpec() { .build(); // Incorrect device spec, it will never be executed - DeviceSpec incorrectDeviceSpec = DeviceSpec.newBuilder() + DeviceSpec incorrectDeviceSpec = + DeviceSpec.newBuilder() .job("UNKNOWN") .replica(1) .task(1000) @@ -165,22 +160,17 @@ public void withIncorrectDeviceSpec() { Constant aOps = tf.constant(-1); Constant bOps = tf.constant(10); - Output absOps = tf - .withName("absWithDevice") - .withDevice(incorrectDeviceSpec) - .math - .abs(aOps) - .asOutput(); + Output absOps = + tf.withName("absWithDevice").withDevice(incorrectDeviceSpec).math.abs(aOps).asOutput(); - Output mulOps = tf - .withName("mulWithDevice") + Output mulOps = + tf.withName("mulWithDevice") .withDevice(correctDeviceSpec) .math .mul(absOps, bOps) .asOutput(); - try (AutoCloseableList t = - new AutoCloseableList<>(session.runner().fetch(mulOps).run())) { + try (Result t = session.runner().fetch(mulOps).run()) { fail(); } catch (TFInvalidArgumentException e) { // ok @@ -190,12 +180,15 @@ public void withIncorrectDeviceSpec() { @Test public void withDeviceSpecInScope() { - ConfigProto config = ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) + ConfigProto config = + ConfigProto.newBuilder(ConfigProto.getDefaultInstance()) .setLogDevicePlacement(true) .build(); - try (Graph g = new Graph(); Session session = new Session(g, config)) { - DeviceSpec deviceSpec = DeviceSpec.newBuilder() + try (Graph g = new Graph(); + Session session = new Session(g, config)) { + DeviceSpec deviceSpec = + DeviceSpec.newBuilder() .job("localhost") .replica(0) .task(0) @@ -206,15 +199,10 @@ public void withDeviceSpecInScope() { Constant aOps = tf.constant(-1); - Output absOps = tf - .withName("absWithDevice") - .math - .abs(aOps) - .asOutput(); + Output absOps = tf.withName("absWithDevice").math.abs(aOps).asOutput(); - try (AutoCloseableList t = - new AutoCloseableList<>(session.runner().fetch(absOps).run())) { - assertEquals(1, ((TInt32)t.get(0)).getInt()); + try (Result t = session.runner().fetch(absOps).run()) { + assertEquals(1, ((TInt32) t.get(0)).getInt()); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java index 154d3903dcd..ff691e30adb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphTest.java @@ -25,7 +25,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; -import java.util.List; import java.util.Set; import org.junit.jupiter.api.Test; import org.tensorflow.exceptions.TFInvalidArgumentException; @@ -84,15 +83,13 @@ public void graphDefRoundTripWithInit() { Operand variable2 = init.withName("var2").variable(init.constant(4)); - try (Session s = new Session(g, true)) { - List results = s.runner().fetch("result").fetch("var2").run(); + try (Session s = new Session(g, true); + Result results = s.runner().fetch("result").fetch("var2").run()) { TInt32 result = (TInt32) results.get(0); assertEquals(6, result.getInt()); TInt32 var2Result = (TInt32) results.get(1); assertEquals(4, var2Result.getInt()); - - results.forEach(Tensor::close); } } } @@ -266,15 +263,14 @@ public void addGradientsToGraph() { try (TFloat32 c1 = TFloat32.scalarOf(3.0f); TFloat32 c2 = TFloat32.scalarOf(2.0f); - AutoCloseableList outputs = - new AutoCloseableList<>( - s.runner() - .feed(x1, c1) - .feed(x2, c2) - .fetch(grads0[0]) - .fetch(grads1[0]) - .fetch(grads1[1]) - .run())) { + Result outputs = + s.runner() + .feed(x1, c1) + .feed(x2, c2) + .fetch(grads0[0]) + .fetch(grads1[0]) + .fetch(grads1[1]) + .run()) { assertEquals(3, outputs.size()); assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); assertEquals(6.0f, ((TFloat32) outputs.get(1)).getFloat(), 0.0f); @@ -418,14 +414,13 @@ public void buildWhileLoopMultipleInputs() { try (TInt32 c1 = TInt32.scalarOf(2); TInt32 c2 = TInt32.scalarOf(5); - AutoCloseableList outputs = - new AutoCloseableList<>( - s.runner() - .feed(input1, c1) - .feed(input2, c2) - .fetch(loopOutputs[0]) - .fetch(loopOutputs[1]) - .run())) { + Result outputs = + s.runner() + .feed(input1, c1) + .feed(input2, c2) + .fetch(loopOutputs[0]) + .fetch(loopOutputs[1]) + .run()) { assertEquals(2, outputs.size()); assertEquals(16, ((TInt32) outputs.get(0)).getInt()); // ((2^2)^2) assertEquals(625, ((TInt32) outputs.get(1)).getInt()); // ((5^2)^2) diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java index be6f952fb6a..deff52ffbeb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java @@ -215,7 +215,10 @@ public void exportFunctionWithVariables() throws IOException { // Now call the same function directly from the model try (TFloat32 zTensor = (TFloat32) - savedModel.call(Collections.singletonMap("input", xTensor)).get("reducedSum")) { + savedModel + .call(Collections.singletonMap("input", xTensor)) + .get("reducedSum") + .get()) { assertEquals(reducedSum, zTensor.getFloat(), EPSILON); } } @@ -293,9 +296,9 @@ public void pythonTfFunction() { System.out.println(add.signature()); args.put("a", a); args.put("b", b); - Map result = add.call(args); + Result result = add.call(args); assertEquals(result.size(), 1); - try (TFloat32 c = (TFloat32) result.values().iterator().next()) { + try (TFloat32 c = (TFloat32) result.get(0)) { assertEquals(25.5f, c.getFloat()); } } @@ -307,11 +310,7 @@ public void pythonTfFunction() { args.put("dummy", dummy); // TF functions always require an input, so we supply a dummy one here // This test actually checks that resource variables can be loaded correctly. - try (TFloat32 v = - (TFloat32) - getVariable - .call(args) - .get(getVariable.signature().outputNames().iterator().next())) { + try (TFloat32 v = (TFloat32) getVariable.call(args).get(0)) { assertEquals(2f, v.getFloat()); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java index 95da0520f7d..918ccac5fe2 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SessionTest.java @@ -16,7 +16,6 @@ package org.tensorflow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -26,6 +25,7 @@ import java.nio.file.Path; import java.util.Comparator; import java.util.Iterator; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.tensorflow.ndarray.NdArrays; import org.tensorflow.ndarray.Shape; @@ -38,6 +38,7 @@ import org.tensorflow.op.math.Add; import org.tensorflow.proto.framework.ConfigProto; import org.tensorflow.proto.framework.GraphDef; +import org.tensorflow.proto.framework.RunMetadata; import org.tensorflow.proto.framework.RunOptions; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt32; @@ -69,8 +70,7 @@ public void runUsingOperationNames() { Ops tf = Ops.create(g); transpose_A_times_X(tf, new int[][] {{2}, {3}}); try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}})); - AutoCloseableList outputs = - new AutoCloseableList<>(s.runner().feed("X", x).fetch("Y").run())) { + Result outputs = s.runner().feed("X", x).fetch("Y").run()) { assertEquals(1, outputs.size()); assertEquals(31, ((TInt32) outputs.get(0)).getInt(0, 0)); } @@ -86,8 +86,7 @@ public void runUsingOperationHandles() { Output feed = g.operation("X").output(0); Output fetch = g.operation("Y").output(0); try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}})); - AutoCloseableList outputs = - new AutoCloseableList<>(s.runner().feed(feed, x).fetch(fetch).run())) { + Result outputs = s.runner().feed(feed, x).fetch(fetch).run()) { assertEquals(1, outputs.size()); assertEquals(31, ((TInt32) outputs.get(0)).getInt(0, 0)); } @@ -124,20 +123,20 @@ public void runWithMetadata() { Ops tf = Ops.create(g); transpose_A_times_X(tf, new int[][] {{2}, {3}}); try (TInt32 x = TInt32.tensorOf(StdArrays.ndCopyOf(new int[][] {{5}, {7}}))) { - Session.Run result = + Result result = s.runner() .feed("X", x) .fetch("Y") .setOptions(fullTraceRunOptions()) .runAndFetchMetadata(); // Sanity check on outputs. - AutoCloseableList outputs = new AutoCloseableList<>(result.outputs); - assertEquals(1, outputs.size()); - assertEquals(31, ((TInt32) outputs.get(0)).getInt(0, 0)); + assertEquals(1, result.size()); + assertEquals(31, ((TInt32) result.get(0)).getInt(0, 0)); // Sanity check on metadata - assertNotNull(result.metadata); - assertTrue(result.metadata.hasStepStats(), result.metadata.toString()); - outputs.close(); + Optional metadata = result.getMetadata(); + assertTrue(metadata.isPresent()); + assertTrue(metadata.get().hasStepStats(), metadata.get().toString()); + result.close(); } } } @@ -149,8 +148,7 @@ public void runMultipleOutputs() { Ops tf = Ops.create(g); tf.withName("c1").constant(2718); tf.withName("c2").constant(31415); - AutoCloseableList outputs = - new AutoCloseableList<>(s.runner().fetch("c2").fetch("c1").run()); + Result outputs = s.runner().fetch("c2").fetch("c1").run(); assertEquals(2, outputs.size()); assertEquals(31415, ((TInt32) outputs.get(0)).getInt()); assertEquals(2718, ((TInt32) outputs.get(1)).getInt()); @@ -227,10 +225,8 @@ public void saveAndRestore() throws IOException { restoredGraph.importGraphDef(graphDef); try (Session restoredSession = new Session(restoredGraph)) { restoredSession.restore(testFolder.resolve("checkpoint").toString()); - try (AutoCloseableList oldList = - new AutoCloseableList<>(s.runner().fetch("x").fetch("y").run()); - AutoCloseableList newList = - new AutoCloseableList<>(restoredSession.runner().fetch("x").fetch("y").run())) { + try (Result oldList = s.runner().fetch("x").fetch("y").run(); + Result newList = restoredSession.runner().fetch("x").fetch("y").run()) { assertEquals(oldList.get(0), newList.get(0)); assertEquals(oldList.get(1), newList.get(1)); } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java index 16c14f7a9a3..4edbea33b0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/BooleanMaskUpdateTest.java @@ -18,12 +18,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals; -import java.util.List; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Session; -import org.tensorflow.Tensor; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; @@ -50,7 +49,7 @@ public void testBooleanMaskUpdateSlice() { Operand bcastOutput = BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); - List results = sess.runner().fetch(output).fetch(bcastOutput).run(); + Result results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); TInt32 bcastResult = (TInt32) results.get(1)) { @@ -89,7 +88,7 @@ public void testBooleanMaskUpdateSliceWithBroadcast() { Operand bcastOutput = BooleanMaskUpdate.create(scope, input, mask, Constant.scalarOf(scope, -1)); - List results = sess.runner().fetch(output).fetch(bcastOutput).run(); + Result results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); TInt32 bcastResult = (TInt32) results.get(1)) { @@ -131,7 +130,7 @@ public void testBooleanMaskUpdateAxis() { BooleanMaskUpdate.create( scope, input, mask, Constant.scalarOf(scope, -1), BooleanMaskUpdate.axis(2)); - List results = sess.runner().fetch(output).fetch(bcastOutput).run(); + Result results = sess.runner().fetch(output).fetch(bcastOutput).run(); try (TInt32 result = (TInt32) results.get(0); TInt32 bcastResult = (TInt32) results.get(1)) { diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java index 5c413b3abeb..5194fccd707 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java @@ -19,12 +19,11 @@ import java.io.IOException; import org.junit.jupiter.api.Test; -import org.tensorflow.AutoCloseableList; import org.tensorflow.EagerSession; import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Session; -import org.tensorflow.Tensor; import org.tensorflow.ndarray.DoubleNdArray; import org.tensorflow.ndarray.FloatNdArray; import org.tensorflow.ndarray.IntNdArray; @@ -66,8 +65,7 @@ public void createInts() { Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); - try (AutoCloseableList t = - new AutoCloseableList<>(sess.runner().fetch(op1).fetch(op2).run())) { + try (Result t = sess.runner().fetch(op1).fetch(op2).run()) { assertEquals(array, t.get(0)); assertEquals(array, t.get(1)); } @@ -85,8 +83,7 @@ public void createFloats() { Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); - try (AutoCloseableList t = - new AutoCloseableList<>(sess.runner().fetch(op1).fetch(op2).run())) { + try (Result t = sess.runner().fetch(op1).fetch(op2).run()) { assertEquals(array, t.get(0)); assertEquals(array, t.get(1)); } @@ -104,8 +101,7 @@ public void createDoubles() { Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); - try (AutoCloseableList t = - new AutoCloseableList<>(sess.runner().fetch(op1).fetch(op2).run())) { + try (Result t = sess.runner().fetch(op1).fetch(op2).run()) { assertEquals(array, t.get(0)); assertEquals(array, t.get(1)); } @@ -123,8 +119,7 @@ public void createLongs() { Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); - try (AutoCloseableList t = - new AutoCloseableList<>(sess.runner().fetch(op1).fetch(op2).run())) { + try (Result t = sess.runner().fetch(op1).fetch(op2).run()) { assertEquals(array, t.get(0)); assertEquals(array, t.get(1)); } @@ -142,8 +137,7 @@ public void createStrings() throws IOException { Scope scope = new OpScope(g); Constant op1 = Constant.tensorOf(scope, shape, buffer); Constant op2 = Constant.tensorOf(scope, array); - try (AutoCloseableList t = - new AutoCloseableList<>(sess.runner().fetch(op1).fetch(op2).run())) { + try (Result t = sess.runner().fetch(op1).fetch(op2).run()) { assertEquals(array, t.get(0)); assertEquals(array, t.get(1)); } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GradientsTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GradientsTest.java index 80150b64bb6..fb52b2d1059 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GradientsTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GradientsTest.java @@ -21,11 +21,10 @@ import java.util.Arrays; import org.junit.jupiter.api.Test; -import org.tensorflow.AutoCloseableList; import org.tensorflow.Graph; import org.tensorflow.Output; +import org.tensorflow.Result; import org.tensorflow.Session; -import org.tensorflow.Tensor; import org.tensorflow.op.Ops; import org.tensorflow.types.TFloat32; @@ -48,12 +47,10 @@ public void createGradients() { assertEquals(2, grads.dy().size()); try (TFloat32 c = TFloat32.scalarOf(3.0f); - AutoCloseableList outputs = - new AutoCloseableList<>( - sess.runner().feed(x, c).fetch(grads.dy(0)).fetch(grads.dy(1)).run())) { + Result outputs = sess.runner().feed(x, c).fetch(grads.dy(0)).fetch(grads.dy(1)).run()) { - assertEquals(108.0f, ((TFloat32)outputs.get(0)).getFloat(), 0.0f); - assertEquals(18.0f, ((TFloat32)outputs.get(1)).getFloat(), 0.0f); + assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); + assertEquals(18.0f, ((TFloat32) outputs.get(1)).getFloat(), 0.0f); } } } @@ -75,10 +72,9 @@ public void createGradientsWithSum() { assertEquals(1, grads.dy().size()); try (TFloat32 c = TFloat32.scalarOf(3.0f); - AutoCloseableList outputs = - new AutoCloseableList<>(sess.runner().feed(x, c).fetch(grads.dy(0)).run())) { + Result outputs = sess.runner().feed(x, c).fetch(grads.dy(0)).run()) { - assertEquals(114.0f, ((TFloat32)outputs.get(0)).getFloat(), 0.0f); + assertEquals(114.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); } } } @@ -94,18 +90,17 @@ public void createGradientsWithInitialValues() { Output y1 = tf.math.square(y0).y(); Gradients grads0 = Gradients.create(tf.scope(), y1, Arrays.asList(y0)); - Gradients grads1 = Gradients.create(tf.scope(), y0, Arrays.asList(x), Gradients.dx(grads0.dy())); + Gradients grads1 = + Gradients.create(tf.scope(), y0, Arrays.asList(x), Gradients.dx(grads0.dy())); assertNotNull(grads1); assertNotNull(grads1.dy()); assertEquals(1, grads1.dy().size()); try (TFloat32 c = TFloat32.scalarOf(3.0f); - AutoCloseableList outputs = - new AutoCloseableList<>( - sess.runner().feed(x, c).fetch(grads1.dy(0)).run())) { + Result outputs = sess.runner().feed(x, c).fetch(grads1.dy(0)).run()) { - assertEquals(108.0f, ((TFloat32)outputs.get(0)).getFloat(), 0.0f); + assertEquals(108.0f, ((TFloat32) outputs.get(0)).getFloat(), 0.0f); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java index b4d36702c93..73b7e0a551c 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java @@ -19,9 +19,9 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import java.util.List; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; +import org.tensorflow.Result; import org.tensorflow.Session; import org.tensorflow.op.OpScope; import org.tensorflow.op.Scope; @@ -134,7 +134,7 @@ public void operationsComposingZerosAreCorrectlyNamed() { long[] shape = {2, 2}; Zeros zeros = Zeros.create(scope.withSubScope("test"), Constant.vectorOf(scope, shape), TFloat32.class); - List results = + Result results = sess.runner().addTarget("test/Zeros/Zero").addTarget("test/Zeros/Fill").run(); } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/data/DatasetIteratorTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/data/DatasetIteratorTest.java index 1f8503829b7..1bbeb1a3f0a 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/data/DatasetIteratorTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/data/DatasetIteratorTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Session; import org.tensorflow.exceptions.TFOutOfRangeException; import org.tensorflow.op.Ops; @@ -51,15 +52,10 @@ public void testGraphIteration() { int batches = 0; while (true) { - try { - List outputs = session.runner().fetch(x).fetch(y).run(); - - try (TInt32 xBatch = (TInt32) outputs.get(0); - TInt32 yBatch = (TInt32) outputs.get(1)) { - assertEquals(testMatrix1.get(batches), xBatch); - assertEquals(testMatrix2.get(batches), yBatch); - batches++; - } + try (Result outputs = session.runner().fetch(x).fetch(y).run()) { + assertEquals(testMatrix1.get(batches), outputs.get(0)); + assertEquals(testMatrix2.get(batches), outputs.get(1)); + batches++; } catch (TFOutOfRangeException e) { break; } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/data/MapDatasetTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/data/MapDatasetTest.java index afa38e04ee8..e75bdde766e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/data/MapDatasetTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/data/MapDatasetTest.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.Test; import org.tensorflow.Graph; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Session; import org.tensorflow.exceptions.TFOutOfRangeException; import org.tensorflow.ndarray.IntNdArray; @@ -76,17 +77,11 @@ public void testGraphIteration() { int batches = 0; while (true) { - try { - List outputs = session.runner().fetch(X).fetch(y).run(); + try (Result outputs = session.runner().fetch(X).fetch(y).run()) { + assertEquals(mapped1.get(batches), outputs.get(0)); + assertEquals(mapped2.get(batches), outputs.get(1)); - try (TInt32 XBatch = (TInt32) outputs.get(0); - TInt32 yBatch = (TInt32) outputs.get(1)) { - - assertEquals(mapped1.get(batches), XBatch); - assertEquals(mapped2.get(batches), yBatch); - - batches++; - } + batches++; } catch (TFOutOfRangeException e) { break; } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/AssertBroadcastableTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/AssertBroadcastableTest.java index 4330fa0aed7..fc1e2fe9573 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/AssertBroadcastableTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/AssertBroadcastableTest.java @@ -14,8 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.Test; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Tensor; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Op; @@ -26,10 +29,6 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertThrows; - public class AssertBroadcastableTest { private final TestSession.Mode tfMode = TestSession.Mode.GRAPH; @@ -69,10 +68,10 @@ private void testValid( Operand weightsPlaceholder = tf.placeholder(type); Operand valuesPlaceholder = tf.placeholder(type); - List tensors = - testSession.getGraphSession().runner().fetch(weights).fetch(values).run(); - try (Tensor weightsTensor = tensors.get(0); - Tensor valuesTensor = tensors.get(1)) { + try (Result tensors = + testSession.getGraphSession().runner().fetch(weights).fetch(values).run()) { + Tensor weightsTensor = tensors.get(0); + Tensor valuesTensor = tensors.get(1); Op dynamicOp = MetricsHelper.assertBroadcastable(tf, weightsPlaceholder, valuesPlaceholder); testSession diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/BroadcastWeightsTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/BroadcastWeightsTest.java index 3322a81fe5b..9df29436e31 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/BroadcastWeightsTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/impl/BroadcastWeightsTest.java @@ -14,8 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.metrics.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; +import org.tensorflow.Result; import org.tensorflow.Tensor; import org.tensorflow.framework.utils.TestSession; import org.tensorflow.op.Ops; @@ -25,12 +30,6 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - public class BroadcastWeightsTest { private final TestSession.Mode tfMode = TestSession.Mode.GRAPH; @@ -78,55 +77,57 @@ private void testValid( Operand weightsPlaceholder = tf.placeholder(type); Operand valuesPlaceholder = tf.placeholder(type); - List tensors = - testSession.getGraphSession().runner().fetch(weights).fetch(values).run(); - try (Tensor weightsTensor = tensors.get(0); - Tensor valuesTensor = tensors.get(1)) { + try (Result tensors = + testSession.getGraphSession().runner().fetch(weights).fetch(values).run()) { + Tensor weightsTensor = tensors.get(0); + Tensor valuesTensor = tensors.get(1); Operand dynamicOp = MetricsHelper.broadcastWeights(tf, weightsPlaceholder, valuesPlaceholder); - List result = + try (Result result = testSession .getGraphSession() .runner() .feed(weightsPlaceholder, weightsTensor) .feed(valuesPlaceholder, valuesTensor) .fetch(dynamicOp) - .run(); - - if (expected != null) { - if (type.equals(TInt32.class)) { - TInt32 intT = (TInt32) result.get(0); - AtomicInteger i = new AtomicInteger(); - intT.scalars() - .forEachIndexed( - (idx, f) -> assertEquals(expected[i.getAndIncrement()].intValue(), f.getInt())); - } else if (type.equals(TInt64.class)) { - TInt64 floatT = (TInt64) result.get(0); - AtomicInteger i = new AtomicInteger(); - floatT - .scalars() - .forEachIndexed( - (idx, f) -> assertEquals(expected[i.getAndIncrement()].longValue(), f.getLong())); - } else if (type.equals(TFloat32.class)) { - TFloat32 floatT = (TFloat32) result.get(0); - AtomicInteger i = new AtomicInteger(); - floatT - .scalars() - .forEachIndexed( - (idx, f) -> - assertEquals( - expected[i.getAndIncrement()].floatValue(), f.getFloat(), 1e-5F)); - } else if (type.equals(TFloat64.class)) { - TFloat64 doubleT = (TFloat64) result.get(0); - AtomicInteger i = new AtomicInteger(); - doubleT - .scalars() - .forEachIndexed( - (idx, f) -> - assertEquals( - expected[i.getAndIncrement()].doubleValue(), f.getDouble(), 1e-5F)); + .run()) { + + if (expected != null) { + if (type.equals(TInt32.class)) { + TInt32 intT = (TInt32) result.get(0); + AtomicInteger i = new AtomicInteger(); + intT.scalars() + .forEachIndexed( + (idx, f) -> assertEquals(expected[i.getAndIncrement()].intValue(), f.getInt())); + } else if (type.equals(TInt64.class)) { + TInt64 floatT = (TInt64) result.get(0); + AtomicInteger i = new AtomicInteger(); + floatT + .scalars() + .forEachIndexed( + (idx, f) -> + assertEquals(expected[i.getAndIncrement()].longValue(), f.getLong())); + } else if (type.equals(TFloat32.class)) { + TFloat32 floatT = (TFloat32) result.get(0); + AtomicInteger i = new AtomicInteger(); + floatT + .scalars() + .forEachIndexed( + (idx, f) -> + assertEquals( + expected[i.getAndIncrement()].floatValue(), f.getFloat(), 1e-5F)); + } else if (type.equals(TFloat64.class)) { + TFloat64 doubleT = (TFloat64) result.get(0); + AtomicInteger i = new AtomicInteger(); + doubleT + .scalars() + .forEachIndexed( + (idx, f) -> + assertEquals( + expected[i.getAndIncrement()].doubleValue(), f.getDouble(), 1e-5F)); + } } } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java index 909fd53ca27..a59f67f5a99 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/optimizers/GradientDescentTest.java @@ -4,6 +4,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -11,6 +12,7 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.tensorflow.Graph; +import org.tensorflow.Result; import org.tensorflow.Session; import org.tensorflow.Tensor; import org.tensorflow.framework.initializers.Glorot; @@ -189,13 +191,18 @@ public void testDeterminism() { g.importGraphDef(def); s.initialize(); - initialized.add( + Result initializationRes = s.runner() .fetch(fcWeightName) .fetch(fcBiasName) .fetch(outputWeightName) .fetch(outputBiasName) - .run()); + .run(); + List initializedRun = new ArrayList<>(); + for (Map.Entry e : initializationRes) { + initializedRun.add(e.getValue()); + } + initialized.add(initializedRun); TFloat32 lossVal = (TFloat32) @@ -209,13 +216,18 @@ public void testDeterminism() { initialLoss[i] = lossVal.getFloat(); lossVal.close(); - trained.add( + Result trainedRes = s.runner() .fetch(fcWeightName) .fetch(fcBiasName) .fetch(outputWeightName) .fetch(outputBiasName) - .run()); + .run(); + List trainedRun = new ArrayList<>(); + for (Map.Entry e : trainedRes) { + trainedRun.add(e.getValue()); + } + trained.add(trainedRun); lossVal = (TFloat32) From 9ab7fb654d0049a7cd24376102586330c79e1aaa Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Tue, 8 Feb 2022 20:28:09 -0500 Subject: [PATCH 11/21] Bumping to TF 2.7.1, protobuf 3.19.4, error-prone 2.10.0 (#412) --- pom.xml | 2 +- tensorflow-core/pom.xml | 4 +++- tensorflow-core/tensorflow-core-api/WORKSPACE | 6 +++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pom.xml b/pom.xml index f4f1b18928b..102d51e0149 100644 --- a/pom.xml +++ b/pom.xml @@ -42,7 +42,7 @@ 5.6.2 1.21 2.7 - 2.6.0 + 2.10.0 true true true diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index d2a3e9d393d..54b8ab8372f 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -40,8 +40,10 @@ Match version used by TensorFlow, in https://github.com/tensorflow/tensorflow/blob/master/tensorflow/workspace2.bzl#L567 (but for the currently used release, not master) + + Bumped to newer version to patch a CVE only present in protobuf-java --> - 3.9.2 + 3.19.2 ${javacpp.platform}${javacpp.platform.extension} false diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index 8be7695be78..0ac4d82a193 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -18,10 +18,10 @@ http_archive( patch_args = ["-p1"], patch_cmds = ["grep -rl 'java_package' tensorflow/core | xargs sed -i.bak 's/^\(.* java_package = \"org\.tensorflow\.\)\(.*\"\)/\\1proto.\\2'/"], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.0.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.1.tar.gz", ], - sha256 = "bb124905c7fdacd81e7c842b287c169bbf377d29c74c9dacc04f96c9793747bb", - strip_prefix = "tensorflow-2.7.0" + sha256 = "abebe2cf5ca379e18071693ca5f45b88ade941b16258a21cc1f12d77d5387a21", + strip_prefix = "tensorflow-2.7.1" ) # START: Upstream TensorFlow dependencies From 5210c7e7aff5d0fb615ffb5a40769c632a37db3a Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Fri, 11 Feb 2022 21:12:58 -0500 Subject: [PATCH 12/21] Fix metric test failures (#414) * Migrate metric tests from randomUniform to statelessRandomUniform * pom updates. * Spotless changes. --- pom.xml | 6 ++++-- tensorflow-core/pom.xml | 2 +- tensorflow-framework/pom.xml | 2 +- .../framework/metrics/PrecisionAtRecallTest.java | 9 ++++----- .../framework/metrics/PrecisionTest.java | 15 +++++++++------ .../framework/metrics/RecallAtPrecisionTest.java | 9 ++++----- .../tensorflow/framework/metrics/RecallTest.java | 6 ++++-- .../metrics/SensitivityAtSpecificityTest.java | 9 ++++----- .../metrics/SpecificityAtSensitivityTest.java | 9 ++++----- 9 files changed, 35 insertions(+), 32 deletions(-) diff --git a/pom.xml b/pom.xml index 102d51e0149..66687aade72 100644 --- a/pom.xml +++ b/pom.xml @@ -46,7 +46,7 @@ true true true - 2.11.1 + 2.20.2 @@ -371,7 +371,9 @@ - + + 1.14.0 + diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 54b8ab8372f..7d3dd3ca324 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -43,7 +43,7 @@ Bumped to newer version to patch a CVE only present in protobuf-java --> - 3.19.2 + 3.19.4 ${javacpp.platform}${javacpp.platform.extension} false diff --git a/tensorflow-framework/pom.xml b/tensorflow-framework/pom.xml index 026bf227afe..b0d4900fb1d 100644 --- a/tensorflow-framework/pom.xml +++ b/tensorflow-framework/pom.xml @@ -93,7 +93,7 @@ 1 false - -Xmx2G -XX:MaxPermSize=256m + -Xmx2G **/*Test.java diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java index 8132b74d7cd..756a7651363 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { PrecisionAtRecall instance = new PrecisionAtRecall<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java index b195432115e..673a563f894 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java @@ -22,7 +22,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { Precision instance = new Precision<>(new float[] {0.3f, 0.72f}, 1001L, TFloat64.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1001L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1001L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1001L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1001L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); @@ -81,7 +80,11 @@ public void testUnweightedAllIncorrect() { Precision instance = new Precision<>(0.5f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniformInt(tf.constant(Shape.of(100, 1)), tf.constant(0), tf.constant(2)); + tf.random.statelessMultinomial( + tf.constant(new float[][] {{0.5f, 0.5f}}), + tf.constant(100), + tf.constant(new long[] {1001L, 0L}), + TInt32.class); Operand labels = tf.math.sub(tf.constant(1), predictions); Op update = instance.updateState(tf, labels, predictions, null); session.run(update); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java index 36dba3180b7..184c42b7326 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { RecallAtPrecision instance = new RecallAtPrecision<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); labels = tf.math.mul(labels, tf.constant(2.0f)); Op update = instance.updateState(tf, labels, predictions); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java index e820cbe0d74..e862ffe280e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java @@ -36,9 +36,11 @@ public void testValueIsIdempotent() { Recall instance = new Recall<>(new float[] {0.3f, 0.72f}, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform(tf.constant(Shape.of(10, 3)), TFloat32.class); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform(tf.constant(Shape.of(10, 3)), TFloat32.class); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); for (int i = 0; i < 10; i++) session.run(update); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java index d18ca9813fe..179dbf2b9fc 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt64; @@ -40,11 +39,11 @@ public void testValueIsIdempotent() { SensitivityAtSpecificity instance = new SensitivityAtSpecificity<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); labels = tf.math.mul(labels, tf.constant(2.0f)); // instance.setDebug(session.getGraphSession()); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java index 676b443cd1c..6507345bbb4 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; @@ -42,11 +41,11 @@ public void testValueIsIdempotent() { new SpecificityAtSensitivity<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); // instance.setDebug(session.getGraphSession()); Op update = instance.updateState(tf, labels, predictions, null); From 02ec49068c21fdd3c70316b380d938a4c480c22d Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Tue, 8 Mar 2022 11:12:41 -0500 Subject: [PATCH 13/21] Migrate on Java 11 as minimum supported version --- .github/workflows/ci.yml | 51 ++++++++++---- .mvn/jvm.config | 10 +++ README.md | 26 +++---- pom.xml | 47 ++++++++----- tensorflow-core/tensorflow-core-api/pom.xml | 4 +- .../src/main/java/module-info.java | 67 +++++++++++++++++++ .../src/main/java/module-info.java | 27 ++++++++ .../op/generator/AttributeType.java | 2 +- .../tensorflow/op/generator/FullOpDef.java | 2 +- .../tensorflow/op/generator/StatefulPair.java | 2 +- tensorflow-framework/pom.xml | 4 -- .../src/main/java/module-info.java | 35 ++++++++++ 12 files changed, 226 insertions(+), 51 deletions(-) create mode 100644 .mvn/jvm.config create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/module-info.java create mode 100644 tensorflow-core/tensorflow-core-generator/src/main/java/module-info.java create mode 100644 tensorflow-framework/src/main/java/module-info.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0fd044979af..4eeaf85de8a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,53 +20,61 @@ jobs: runs-on: ubuntu-latest container: centos:7 steps: - - name: Checkout repository - uses: actions/checkout@v1 - name: Install environment run: | yum -y update yum -y install centos-release-scl-rh epel-release - yum -y install java-11-openjdk-devel devtoolset-9 + yum -y install devtoolset-9 echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn + - name: Configure Java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '17' + - name: Checkout repository + uses: actions/checkout@v1 - name: Build project run: | source scl_source enable devtoolset-9 || true - export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) echo $JAVA_HOME mvn -version - mvn clean install -Pdev,jdk11 -B -U -e -Dlint.skip=true + mvn clean install -Pdev,jdk17 -B -U -e -Dlint.skip=true - name: Run lint checks run: | - mvn compiler:compile -Pdev,jdk11 -B -U -e + mvn compiler:compile -Pdev,jdk17 -B -U -e check-format: if: github.event_name == 'pull_request' runs-on: ubuntu-latest container: centos:7 steps: - - name: Checkout repository - uses: actions/checkout@v1 - name: Install environment run: | yum -y update yum -y install centos-release-scl-rh epel-release - yum -y install java-11-openjdk-devel devtoolset-9 + yum -y install devtoolset-9 echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn + - name: Configure Java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '17' + - name: Checkout repository + uses: actions/checkout@v1 - name: Build project run: | source scl_source enable devtoolset-9 || true - export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) echo $JAVA_HOME mvn -version - mvn clean install -Pdev,jdk11 -B -U -e -Dlint.skip=true -Dmaven.test.skip=true + mvn clean install -Pdev,jdk17 -B -U -e -Dlint.skip=true -Dmaven.test.skip=true - name: Run format checks run: | - mvn spotless:check -Pdev,jdk11 -B -U -e + mvn spotless:check -Pdev,jdk17 -B -U -e prepare: runs-on: ubuntu-latest outputs: @@ -99,7 +107,7 @@ jobs: yum --disablerepo updates -y install $GLIBC yum -x "$GLIBC" -y update yum -x "$GLIBC" -y install centos-release-scl-rh epel-release - yum -x "$GLIBC" -y install java-1.8.0-openjdk-devel devtoolset-9 rh-git218 patch perl-Data-Dumper python36-devel python36-numpy python36-pip python36-six + yum -x "$GLIBC" -y install devtoolset-9 rh-git218 patch perl-Data-Dumper python36-devel python36-numpy python36-pip python36-six echo Downloading Maven curl -L https://archive.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz -o $HOME/apache-maven-3.6.3-bin.tar.gz tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ @@ -124,6 +132,11 @@ jobs: rm -f $(find /usr/local/cuda/ -name '*.a' -and -not -name libcudart_static.a -and -not -name libcudadevrt.a) rm -rf /usr/local/cuda/doc* /usr/local/cuda/libnvvp* /usr/local/cuda/nsight* /usr/local/cuda/samples* fi + - name: Configure Java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '11' - name: Checkout repository uses: actions/checkout@v1 - name: Build project @@ -163,6 +176,11 @@ jobs: curl -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-darwin-x86_64.sh -o bazel.sh --retry 10 bash bazel.sh brew install libomp perl + - name: Configure Java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '11' - name: Checkout repository uses: actions/checkout@v1 - name: Build project @@ -186,7 +204,7 @@ jobs: df -h windows-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: windows-latest + runs-on: windows-2019 needs: prepare strategy: matrix: @@ -223,6 +241,11 @@ jobs: cp.exe -a cuda/include cuda/lib cuda/bin "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/" ) echo %JAVA_HOME% + - name: Configure Java + uses: actions/setup-java@v2 + with: + distribution: 'adopt' + java-version: '11' - name: Checkout repository uses: actions/checkout@v1 - name: Build project diff --git a/.mvn/jvm.config b/.mvn/jvm.config new file mode 100644 index 00000000000..8488a4fce61 --- /dev/null +++ b/.mvn/jvm.config @@ -0,0 +1,10 @@ +--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED +--add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED +--add-opens jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED +--add-opens jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED \ No newline at end of file diff --git a/README.md b/README.md index 305fb1e759a..33aed938816 100644 --- a/README.md +++ b/README.md @@ -137,19 +137,19 @@ to add Sonatype OSS repository in your pom.xml, like the following ``` -## TensorFlow Version Support - -This table shows the mapping between different version of TensorFlow for Java and the core runtime libraries. - -| TensorFlow Java Version | TensorFlow Version | -| ------------- | ------------- | -| 0.2.0 | 2.3.1 | -| 0.3.0 | 2.4.1 | -| 0.3.1 | 2.4.1 | -| 0.3.2 | 2.4.1 | -| 0.3.3 | 2.4.1 | -| 0.4.0 | 2.7.0 | -| 0.5.0-SNAPSHOT | 2.7.0 | +## TensorFlow/Java Version Support + +This table shows the mapping between TensorFlow, TensorFlow Java and minimum supported Java versions. + +| TensorFlow Java Version | TensorFlow Version | Minimum Java Version | +| ------------- | ------------- | --------------- | +| 0.2.0 | 2.3.1 | 8 | +| 0.3.0 | 2.4.1 | 8 | +| 0.3.1 | 2.4.1 | 8 | +| 0.3.2 | 2.4.1 | 8 | +| 0.3.3 | 2.4.1 | 8 | +| 0.4.0 | 2.7.0 | 8 | +| 0.5.0-SNAPSHOT | 2.7.0 | 11 | ## How to Contribute? diff --git a/pom.xml b/pom.xml index 66687aade72..d87daefcb7b 100644 --- a/pom.xml +++ b/pom.xml @@ -37,8 +37,9 @@ UTF8 - 1.8 - 1.8 + 11 + 11 + 11 5.6.2 1.21 2.7 @@ -174,11 +175,11 @@ - jdk11 + jdk17 - 11 - 11 - 11 + 17 + 17 + 17 @@ -189,11 +190,9 @@ lint - - (1.9,) - !lint.skip + lint.skip !true @@ -205,13 +204,15 @@ 3.8.0 true - true -Xlint:all -XDcompilePolicy=simple -Xplugin:ErrorProne - + -J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED + -J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED + + -J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED @@ -220,8 +221,6 @@ -J--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED - -J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED - -J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED @@ -303,6 +302,14 @@ + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.0 + + true + + org.apache.maven.plugins @@ -366,15 +373,12 @@ spotless-maven-plugin ${spotless.version} - origin/master - 1.14.0 - @@ -387,6 +391,17 @@ maven-jar-plugin 3.2.0 + + org.apache.maven.plugins + maven-surefire-plugin + 3.0.0-M5 + + + **/*Test.java + + false + + diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 142aac1065f..4acb14dfecd 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -203,6 +203,9 @@ org/tensorflow/internal/c_api/presets/*.java + + 8 + @@ -433,7 +436,6 @@ maven-surefire-plugin - 3.0.0-M5 dev - - - org.tensorflow - tensorflow-core-api - ${project.version} - ${native.classifier} - - true + + + + org.apache.maven.plugins + maven-dependency-plugin + + + dev-unpack-native + initialize + + unpack + + + ${project.groupId}:${project.artifactId}:${project.version}:jar:${native.classifier} + ${project.build.directory}/native + + + + + + javacpp-compiler From 9793be09e5f4c0960251238e90fa7e336b798671 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Mon, 14 Mar 2022 10:06:51 -0400 Subject: [PATCH 17/21] Update snapshots version to 0.5.0-SNAPSHOT --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 33aed938816..1809e116ac1 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,7 @@ to add Sonatype OSS repository in your pom.xml, like the following org.tensorflow tensorflow-core-platform - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT ``` @@ -149,7 +149,7 @@ This table shows the mapping between TensorFlow, TensorFlow Java and minimum sup | 0.3.2 | 2.4.1 | 8 | | 0.3.3 | 2.4.1 | 8 | | 0.4.0 | 2.7.0 | 8 | -| 0.5.0-SNAPSHOT | 2.7.0 | 11 | +| 0.5.0-SNAPSHOT | 2.8.0 | 11 | ## How to Contribute? From e1410a49200ff0689538beb576ec01133340d511 Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Sat, 26 Mar 2022 10:03:21 -0400 Subject: [PATCH 18/21] Release v0.4.1 (#432) * Bumping version for point release. * Fix NullPointerException issue. (#407) * Bumping to TF 2.7.1, protobuf 3.19.4, error-prone 2.10.0 (#412) * Fix metric test failures (#414) * Migrate metric tests from randomUniform to statelessRandomUniform Co-authored-by: Frank Liu --- README.md | 17 +++++++++-------- pom.xml | 10 ++++++---- tensorflow-core/pom.xml | 6 ++++-- tensorflow-core/tensorflow-core-api/WORKSPACE | 6 +++--- tensorflow-core/tensorflow-core-api/pom.xml | 2 +- .../java/org/tensorflow/EagerOperation.java | 4 ++-- .../tensorflow-core-generator/pom.xml | 2 +- .../tensorflow-core-platform-gpu/pom.xml | 2 +- .../tensorflow-core-platform/pom.xml | 2 +- tensorflow-framework/pom.xml | 4 ++-- .../metrics/PrecisionAtRecallTest.java | 9 ++++----- .../framework/metrics/PrecisionTest.java | 15 +++++++++------ .../metrics/RecallAtPrecisionTest.java | 9 ++++----- .../framework/metrics/RecallTest.java | 6 ++++-- .../metrics/SensitivityAtSpecificityTest.java | 9 ++++----- .../metrics/SpecificityAtSensitivityTest.java | 9 ++++----- 16 files changed, 59 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index d482990c499..e7ec2098707 100644 --- a/README.md +++ b/README.md @@ -56,12 +56,12 @@ systems, you should add the following dependencies: org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 linux-x86_64${javacpp.platform.extension} ``` @@ -72,24 +72,24 @@ native dependencies as follows: org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 linux-x86_64${javacpp.platform.extension} org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 macosx-x86_64${javacpp.platform.extension} org.tensorflow tensorflow-core-api - 0.4.0 + 0.4.1 windows-x86_64${javacpp.platform.extension} ``` @@ -102,7 +102,7 @@ artifact includes transitively all the artifacts above as a single dependency: org.tensorflow tensorflow-core-platform${javacpp.platform.extension} - 0.4.0 + 0.4.1 ``` @@ -132,7 +132,7 @@ to add Sonatype OSS repository in your pom.xml, like the following org.tensorflow tensorflow-core-platform - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT ``` @@ -149,6 +149,7 @@ This table shows the mapping between different version of TensorFlow for Java an | 0.3.2 | 2.4.1 | | 0.3.3 | 2.4.1 | | 0.4.0 | 2.7.0 | +| 0.4.1 | 2.7.1 | ## How to Contribute? diff --git a/pom.xml b/pom.xml index 4c9b019a003..38e35f46fa5 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.4.1 pom TensorFlow Java Parent @@ -42,11 +42,11 @@ 5.6.2 1.21 2.7 - 2.6.0 + 2.10.0 true true true - 2.11.1 + 2.20.2 @@ -371,7 +371,9 @@ - + + 1.14.0 + diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 407f7f690b5..4cfa9dc6abd 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.4.1 tensorflow-core pom @@ -40,8 +40,10 @@ Match version used by TensorFlow, in https://github.com/tensorflow/tensorflow/blob/master/tensorflow/workspace2.bzl#L567 (but for the currently used release, not master) + + Bumped to newer version to patch a CVE only present in protobuf-java --> - 3.9.2 + 3.19.4 ${javacpp.platform}${javacpp.platform.extension} false diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index 8be7695be78..0ac4d82a193 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -18,10 +18,10 @@ http_archive( patch_args = ["-p1"], patch_cmds = ["grep -rl 'java_package' tensorflow/core | xargs sed -i.bak 's/^\(.* java_package = \"org\.tensorflow\.\)\(.*\"\)/\\1proto.\\2'/"], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.0.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.1.tar.gz", ], - sha256 = "bb124905c7fdacd81e7c842b287c169bbf377d29c74c9dacc04f96c9793747bb", - strip_prefix = "tensorflow-2.7.0" + sha256 = "abebe2cf5ca379e18071693ca5f45b88ade941b16258a21cc1f12d77d5387a21", + strip_prefix = "tensorflow-2.7.1" ) # START: Upstream TensorFlow dependencies diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 4d61c0b8cc0..331c62216dd 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -6,7 +6,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.4.1 tensorflow-core-api jar diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java index 29449847be8..bf0ed87586d 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java @@ -190,9 +190,9 @@ private static Tensor resolveTensorHandle(TFE_TensorHandle handle, EagerSession requireTensorHandle(handle); try (PointerScope scope = new PointerScope()) { TF_Status status = TF_Status.newStatus(); - TF_Tensor tensor = TFE_TensorHandleResolve(handle, status).withDeallocator(); + TF_Tensor tensor = TFE_TensorHandleResolve(handle, status); status.throwExceptionIfNotOK(); - return RawTensor.fromHandle(tensor, session).asTypedTensor(); + return RawTensor.fromHandle(tensor.withDeallocator(), session).asTypedTensor(); } } diff --git a/tensorflow-core/tensorflow-core-generator/pom.xml b/tensorflow-core/tensorflow-core-generator/pom.xml index 14786145f65..3fbd73ca2c6 100644 --- a/tensorflow-core/tensorflow-core-generator/pom.xml +++ b/tensorflow-core/tensorflow-core-generator/pom.xml @@ -5,7 +5,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.4.1 tensorflow-core-generator jar diff --git a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml index 50a4c4cace5..fca1af5b785 100644 --- a/tensorflow-core/tensorflow-core-platform-gpu/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-gpu/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.4.1 tensorflow-core-platform-gpu TensorFlow Core API Library Platform GPU diff --git a/tensorflow-core/tensorflow-core-platform/pom.xml b/tensorflow-core/tensorflow-core-platform/pom.xml index 165079d2e74..a84fc2c4436 100644 --- a/tensorflow-core/tensorflow-core-platform/pom.xml +++ b/tensorflow-core/tensorflow-core-platform/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0 + 0.4.1 tensorflow-core-platform TensorFlow Core API Library Platform diff --git a/tensorflow-framework/pom.xml b/tensorflow-framework/pom.xml index 238b2610622..5a33d5c14c4 100644 --- a/tensorflow-framework/pom.xml +++ b/tensorflow-framework/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0 + 0.4.1 tensorflow-framework jar @@ -93,7 +93,7 @@ 1 false - -Xmx2G -XX:MaxPermSize=256m + -Xmx2G **/*Test.java diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java index 8132b74d7cd..756a7651363 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionAtRecallTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { PrecisionAtRecall instance = new PrecisionAtRecall<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java index b195432115e..673a563f894 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/PrecisionTest.java @@ -22,7 +22,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { Precision instance = new Precision<>(new float[] {0.3f, 0.72f}, 1001L, TFloat64.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1001L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1001L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1001L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1001L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); @@ -81,7 +80,11 @@ public void testUnweightedAllIncorrect() { Precision instance = new Precision<>(0.5f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniformInt(tf.constant(Shape.of(100, 1)), tf.constant(0), tf.constant(2)); + tf.random.statelessMultinomial( + tf.constant(new float[][] {{0.5f, 0.5f}}), + tf.constant(100), + tf.constant(new long[] {1001L, 0L}), + TInt32.class); Operand labels = tf.math.sub(tf.constant(1), predictions); Op update = instance.updateState(tf, labels, predictions, null); session.run(update); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java index 36dba3180b7..184c42b7326 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallAtPrecisionTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TInt64; @@ -39,11 +38,11 @@ public void testValueIsIdempotent() { RecallAtPrecision instance = new RecallAtPrecision<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); labels = tf.math.mul(labels, tf.constant(2.0f)); Op update = instance.updateState(tf, labels, predictions); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java index e820cbe0d74..e862ffe280e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/RecallTest.java @@ -36,9 +36,11 @@ public void testValueIsIdempotent() { Recall instance = new Recall<>(new float[] {0.3f, 0.72f}, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform(tf.constant(Shape.of(10, 3)), TFloat32.class); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform(tf.constant(Shape.of(10, 3)), TFloat32.class); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Op update = instance.updateState(tf, labels, predictions, null); for (int i = 0; i < 10; i++) session.run(update); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java index d18ca9813fe..179dbf2b9fc 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SensitivityAtSpecificityTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt64; @@ -40,11 +39,11 @@ public void testValueIsIdempotent() { SensitivityAtSpecificity instance = new SensitivityAtSpecificity<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); labels = tf.math.mul(labels, tf.constant(2.0f)); // instance.setDebug(session.getGraphSession()); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java index 676b443cd1c..6507345bbb4 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/metrics/SpecificityAtSensitivityTest.java @@ -24,7 +24,6 @@ import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Op; import org.tensorflow.op.Ops; -import org.tensorflow.op.random.RandomUniform; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; @@ -42,11 +41,11 @@ public void testValueIsIdempotent() { new SpecificityAtSensitivity<>(0.7f, 1001L, TFloat32.class); Operand predictions = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); Operand labels = - tf.random.randomUniform( - tf.constant(Shape.of(10, 3)), TFloat32.class, RandomUniform.seed(1L)); + tf.random.statelessRandomUniform( + tf.constant(Shape.of(10, 3)), tf.constant(new long[] {1L, 0L}), TFloat32.class); // instance.setDebug(session.getGraphSession()); Op update = instance.updateState(tf, labels, predictions, null); From 54db82ba70a95ac31a62569eb25946e98a0b00d3 Mon Sep 17 00:00:00 2001 From: Jim Clarke Date: Sat, 26 Mar 2022 10:10:02 -0400 Subject: [PATCH 19/21] Add TF Python Config support to deserialize from Models saved with the TF Engine outside of Java. (#417) * Add the config Map<> activation constructors, add getConfig() and getName() for serializing the activationsto eitehr a Map<> or String name of the TF activation. Add GELU activation. Added static method to each activation to call the activation directly, like in TF Python (e.g. Elu.elu(tf, input)). Added look up tables in the Activation interface to resolve Object creation either vie activation name or activation Map * Update Activations to use an ENUM for config file lookups, add sanity checks in config CTOR to make sure the Map passed in is compatible with the Activation object that is being created. --- .../activations/AbstractActivation.java | 66 ++++++- .../framework/activations/Activation.java | 46 ++++- .../framework/activations/Activations.java | 98 +++++++++ .../tensorflow/framework/activations/ELU.java | 119 +++++++++-- .../framework/activations/Exponential.java | 68 ++++++- .../framework/activations/GELU.java | 158 +++++++++++++++ .../framework/activations/HardSigmoid.java | 81 ++++++-- .../framework/activations/Linear.java | 71 ++++++- .../framework/activations/ReLU.java | 186 +++++++++++++++--- .../framework/activations/SELU.java | 79 ++++++-- .../framework/activations/Sigmoid.java | 84 ++++++-- .../framework/activations/Softmax.java | 127 ++++++++++-- .../framework/activations/Softplus.java | 76 +++++-- .../framework/activations/Softsign.java | 76 +++++-- .../framework/activations/Swish.java | 92 +++++++-- .../framework/activations/Tanh.java | 77 ++++++-- .../org/tensorflow/framework/op/NnOps.java | 32 +++ .../org/tensorflow/framework/op/nn/GELU.java | 102 ++++++++++ .../framework/activations/ActivationTest.java | 93 +++++++++ .../framework/activations/ELUTest.java | 53 ++++- .../activations/ExponentialTest.java | 42 +++- .../framework/activations/GELUTest.java | 148 ++++++++++++++ .../activations/HardSigmoidTest.java | 42 +++- .../framework/activations/LinearTest.java | 43 +++- .../framework/activations/ReLUTest.java | 67 ++++++- .../framework/activations/SELUTest.java | 37 +++- .../framework/activations/SigmoidTest.java | 37 +++- .../framework/activations/SoftmaxTest.java | 43 +++- .../framework/activations/SoftplusTest.java | 37 +++- .../framework/activations/SoftsignTest.java | 37 +++- .../framework/activations/SwishTest.java | 37 +++- .../framework/activations/TanhTest.java | 37 +++- 32 files changed, 2151 insertions(+), 240 deletions(-) create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activations.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/activations/GELU.java create mode 100644 tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/GELU.java create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ActivationTest.java create mode 100644 tensorflow-framework/src/test/java/org/tensorflow/framework/activations/GELUTest.java diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java index 335b8697273..0cb6bd41588 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/AbstractActivation.java @@ -14,11 +14,16 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TNumber; /** Abstract base class for Activations */ -public abstract class AbstractActivation implements Activation { +public abstract class AbstractActivation implements Activation { + protected static final String NAME_KEY = "name"; /** The TensorFlow Ops */ protected Ops tf; @@ -26,6 +31,31 @@ public abstract class AbstractActivation implements Activatio /** Creates the abstract class for an AbstractActivation */ protected AbstractActivation() {} + /** + * Gets a configuration map, this default implementation returns a singleton Map, with {@link + * #NAME_KEY} as the key, and the {@code name} parameter as its value; + * + * @param name the name of the Activation as known by TensorFlow engine. + * @return the configuration map + */ + protected Map getDefaultConfig(String name) { + return Collections.singletonMap(NAME_KEY, name); + } + + /** + * Gets a configuration map + * + * @return the configuration map + */ + public abstract Map getConfig(); + + /** + * Get the name of the activation as known by the TensorFlow Engine + * + * @return the name of the activation as known by the TensorFlow Engine + */ + public abstract String getName(); + /** * Gets the TensorFlow Ops * @@ -43,4 +73,36 @@ protected Ops getTF() { protected void setTF(Ops tf) { this.tf = tf; } + + /** + * Verifies that any key in keysToCheck is also in the allowedKeys set. + * + * @param keysToCheck the set with keys to check + * @param allowedKeys the set to check against. + * @throws IllegalArgumentException if there is an entry in set1 that is not in set 2. + */ + protected void checkConfigKeys(Set keysToCheck, Set allowedKeys) { + List mismatch = + keysToCheck.stream().filter(e -> !allowedKeys.contains(e)).collect(Collectors.toList()); + if (!mismatch.isEmpty()) { + throw new IllegalArgumentException( + String.format("Activation: Illegal Configuration keys: %s", mismatch)); + } + } + + /** + * Verifies that the configuration is for the same Activation class. + * + * @param config the configuration + * @throws IllegalArgumentException if the value for the name key does not match the name for the + * Activation + */ + protected void checkClassName(Map config) { + if (!config.get(NAME_KEY).equals(getName())) { + throw new IllegalArgumentException( + String.format( + "Configuration name: %s, does not match this class: %s", + config.get(NAME_KEY), getName())); + } + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java index f73c6678ab3..10d1f4df10f 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activation.java @@ -14,17 +14,48 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Map; import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; -/** - * Interface for Activations - * - * @param the data type of the input and the result - */ +/** Interface for Activations */ @FunctionalInterface -public interface Activation { +public interface Activation { + + /** + * Creates an Activation instance based on the name as known to the TensorFlow engine. + * + * @param name the activation name + * @return the Activation + * @throws NullPointerException if name is null + * @throws IllegalArgumentException if the name is not a known ActivationType + */ + static Activation create(String name) { + Activations type = Activations.of(name); + return type.getInstance(); + } + + /** + * Creates an Activation getInstance based on a configuration as produced by TensorFlow. + * + * @param config a Map object containing the Activation's state. This Map object must contain at + * least a {@code name} key. + *

    {@code
    +   * "name" : String - this is the TensorFlow Engine's Activation name
    +   * }
    +   * }
    + * + * @return the Activation + * @throws NullPointerException if config is null, or the activation name is missing from the Map. + * @throws IllegalArgumentException if the name contained in the config map is not a known + * ActivationType + */ + static Activation create(Map config) { + String activationName = (String) config.get("name"); + Activations type = Activations.of(activationName); + return type.getInstance(config); + } /** * Gets the calculation operation for the activation. @@ -32,6 +63,7 @@ public interface Activation { * @param tf the TensorFlow Ops * @param input the input tensor * @return The operand for the activation + * @param the data type of the input and the result */ - Operand call(Ops tf, Operand input); + Operand call(Ops tf, Operand input); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activations.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activations.java new file mode 100644 index 00000000000..4b1708882a8 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Activations.java @@ -0,0 +1,98 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.activations; + +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * The Enumerations for creating Activations based an activation name, with either an empty + * constructor or a constructor that takes a Map object that contains the Activation's state. + */ +public enum Activations { + ELU(ELU::new, ELU::new), + EXPONENTIAL(Exponential::new, Exponential::new), + GELU(GELU::new, GELU::new), + HARD_SIGMOID(HardSigmoid::new, HardSigmoid::new), + LINEAR(Linear::new, Linear::new), + RELU(ReLU::new, ReLU::new), + SELU(SELU::new, SELU::new), + SIGMOID(Sigmoid::new, Sigmoid::new), + SOFTMAX(Softmax::new, Softmax::new), + SOFTPLUS(Softplus::new, Softplus::new), + SOFTSIGN(Softsign::new, Softsign::new), + SWISH(Swish::new, Swish::new), + TANH(Tanh::new, Tanh::new); + + /** The constructor when no Config Map is available. */ + private final Supplier emptyCtor; + + /** The constructor to use with a Map object containing the Activation's state */ + private final Function, Activation> configCtor; + + /** + * Creates an ActivationType + * + * @param emptyCtor The constructor when no Config Map is available. + * @param configCtor The constructor to use with a Map object containing the Activation's state + */ + Activations( + Supplier emptyCtor, Function, Activation> configCtor) { + this.emptyCtor = emptyCtor; + this.configCtor = configCtor; + } + + /** + * Gets the ActivationType based on the TensorFlow name for the activation + * + *

    NOTE: this is similar to valueOf, but name can be either case, upper or lower. The + * TensorFlow engine produces names in lowwer case. + * + * @param name the TensorFlow name for the activation + * @return the ActivationType + */ + public static Activations of(final String name) { + return valueOf(name.toUpperCase()); + } + + /** + * Gets the activation name as known to the TensorFlow engine. + * + * @return the activation name as known to the TensorFlow engine. + */ + public String getTensorFlowName() { + return name().toLowerCase(); + } + + /** + * Gets an Activation Instance + * + * @return the new Activation Instance + */ + public Activation getInstance() { + return emptyCtor.get(); + } + + /** + * Gets an Activation Instance + * + * @param config a Map object containing the Activation's state + * @return the new Activation Instance + */ + public Activation getInstance(Map config) { + return configCtor.apply(config); + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java index bd019a60df1..2145b1829f0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ELU.java @@ -16,21 +16,25 @@ import static org.tensorflow.framework.utils.CastHelper.cast; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.TBool; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** * Exponential linear unit. * - *

    The exponential linear unit (ELU) with alpha > 0 is: + *

    The exponential linear unit (ELU) with {@code alpha > 0} is: * - *

    x if x > 0 and alpha * (exp(x) - - * 1) if x < 0. + *

    {@code x} if {@code x > 0} and {@code alpha * (exp(x) - 1)} if {@code x < 0}. * - *

    The ELU hyperparameter alpha controls the value to which an ELU saturates for - * negative net inputs. ELUs diminish the vanishing gradient effect. + *

    The ELU hyperparameter {@code alpha} controls the value to which an ELU saturates for negative + * net inputs. ELUs diminish the vanishing gradient effect. * *

    ELUs have negative values which pushes the mean of the activations closer to zero. Mean * activations that are closer to zero enable faster learning as they bring the gradient closer to @@ -40,20 +44,24 @@ * *

    Example Usage: * - *

    - *     Operand<TFloat32> input = ...;
    - *     ELU<TFloat32> elu = new ELU<>(tf, 2.0f);
    - *     Operand<TFloat32> result = elu.call(input);
    - * 
    + *
    {@code
    + * Operand input = ...;
    + * ELU elu = new ELU<>(tf, 2.0);
    + * Operand result = elu.call(input);
    + * }
    + * }
    * * @see Clevert et al, 2016, Fast and Accurate Deep * Network Learning by Exponential Linear Units (ELUs) */ -public class ELU extends AbstractActivation { +public class ELU extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "elu"; + private static final Set allowedConfigKeys = + new HashSet<>(Arrays.asList(NAME_KEY, "alpha")); private static final double ALPHA_DEFAULT = 1.0; - /** A scalar, slope of negative section. */ private final double alpha; /** Creates a new ELU with alpha={@link #ALPHA_DEFAULT}. */ @@ -72,16 +80,91 @@ public ELU(double alpha) { this.alpha = alpha; } - /** {@inheritDoc} */ - @Override - public Operand call(Ops tf, Operand input) { + /** + * Creates a new ELU from a configuration Map + * + * @param config the configuration map, if the map contains an entry for {@code alpha} that value + * is used, otherwise {@link #ALPHA_DEFAULT} is used. + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public ELU(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + this.alpha = ((Number) config.getOrDefault("alpha", ALPHA_DEFAULT)).doubleValue(); + } + + /** + * Computes the Exponential linear unit. + * + *

    The exponential linear unit (ELU) with {@code alpha > 0} is: + * + *

    {@code x} if {@code x > 0} and {@code alpha * (exp(x) - 1)} if {@code x < 0}.}
    + * + *

    The ELU hyperparameter {@code alpha} controls the value to which an ELU saturates for + * negative net inputs. ELUs diminish the vanishing gradient effect. + * + *

    ELUs have negative values which pushes the mean of the activations closer to zero. Mean + * activations that are closer to zero enable faster learning as they bring the gradient closer to + * the natural gradient. ELUs saturate to a negative value when the argument gets smaller. + * Saturation means a small derivative which decreases the variation and the information that is + * propagated to the next layer. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = ELU.elu(tf, input, 2.0);
    +   * }
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param alpha scalar, slope of negative section. {@code alpha} controls the value to which an + * ELU saturates for negative net inputs. + * @param the data type for the input + * @return The exponential linear unit (ELU) activation function: {@code x} if {@code x > 0} and + * {@code alpha * (exp(x) - 1)} if {@code x < 0} . + */ + public static Operand elu(Ops tf, Operand input, double alpha) { Operand result = tf.nn.elu(input); - if (alpha == 1.0) return result; - else { + if (alpha == 1.0) { + return result; + } else { Class inputType = input.type(); Operand y = tf.math.mul(result, cast(tf, tf.constant(alpha), inputType)); Operand cond = tf.math.greater(result, cast(tf, tf.constant(0), inputType)); return tf.select(cond, result, y); } } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + Map config = new HashMap<>(); + config.put("name", ELU.NAME); + config.put("alpha", alpha); + return config; + } + + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return elu(tf, input, alpha); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } + + /** + * Gets the slope of negative section. + * + * @return the slope of negative section. + */ + public double getAlpha() { + return alpha; + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java index 8398ada6362..e64d0351103 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Exponential.java @@ -14,33 +14,87 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** * Exponential activation function. * *

    For example: * - *

    - *   Operand<TFloat32> input = tf.constant(
    + * 
    {@code
    + *   Operand input = tf.constant(
      *          new float[] {-3.0f,-1.0f, 0.0f,1.0f,3.0f});
    - *   Exponential<TFloat32> exp = new Exponential<>(tf);
    - *   Operand<TFloat32> result = exp.call(input);
    + *   Exponential exp = new Exponential<>(tf);
    + *   Operand result = exp.call(input);
      *   // result is [0.04978707f,  0.36787945f,  1.f,  2.7182817f, 20.085537f]
    - * 
    + * } + * }
    */ -public class Exponential extends AbstractActivation { +public class Exponential extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "exponential"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates an Exponential activation. */ public Exponential() { super(); } + /** + * Creates a new Exponential from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Exponential(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Computes the Exponential activation function. + * + *

    Example Usage: + * + *

    {@code
    +   *      Operand input = ...;
    +   *      Operand result = Exponential.exponential(tf, input);
    +   * }
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the exponential activation: {@code exp(x)}. + */ + public static Operand exponential(Ops tf, Operand input) { + return tf.math.exp(input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return super.getDefaultConfig(NAME); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { + public Operand call(Ops tf, Operand input) { return tf.math.exp(input); } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/GELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/GELU.java new file mode 100644 index 00000000000..4eb60ed2978 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/GELU.java @@ -0,0 +1,158 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.activations; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.tensorflow.Operand; +import org.tensorflow.framework.op.FrameworkOps; +import org.tensorflow.op.Ops; +import org.tensorflow.types.family.TNumber; + +/** + * Applies the Gaussian error linear unit (GELU) activation function. + * + *

    Gaussian error linear unit (GELU) computes {@code x * P(X <= x)}, where {@code P(X) ~ N(0, + * 1)}. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their + * sign as in ReLU. + * + *

    For example: + * + *

    {@code
    + * x = tf.constant(new float[] {-3.0f, -1.0f, 0.0f, 1.0f, 3.f});
    + * GELU gelu = new GELU();
    + * y = gelu.call(tf, x);
    + * // output [-0.00404951f, -0.15865529f, 0.f , 0.8413447f , 2.9959507f ]
    + *
    + * }
    + * }
    + */ +public class GELU extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "gelu"; + + private static final Set allowedConfigKeys = + new HashSet<>(Arrays.asList(GELU.NAME_KEY, "approximate")); + private final boolean approximate; + + /** Creates a Gaussian error linear unit (GELU) activation. */ + public GELU() { + this(false); + } + + /** + * Creates a Gaussian error linear unit (GELU) activation. + * + * @param approximate whether to enable approximation. + */ + public GELU(boolean approximate) { + super(); + this.approximate = approximate; + } + + /** + * Creates a GELU activation from a config map. + * + * @param config the configuration map, if the map contains an entry for {@code approximate} that + * value is used, otherwise false is used. + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public GELU(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + this.approximate = (Boolean) config.getOrDefault("approximate", false); + } + + /** + * Applies the Gaussian error linear unit (GELU) activation function with approximate set to + * false. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Gelu.gelu(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the exponential activation: {@code exp(x)}. + */ + public static Operand gelu(Ops tf, Operand input) { + return gelu(tf, input, false); + } + /** + * Applies the Gaussian error linear unit (GELU) activation function. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Gelu.gelu(tf, input, true);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param approximate whether to enable approximation. + * @param the data type for the input + * @return the exponential activation: {@code exp(x)}. + */ + public static Operand gelu(Ops tf, Operand input, boolean approximate) { + FrameworkOps fops = FrameworkOps.create(tf); + return fops.nn.gelu(input, approximate); + } + + /** + * Gets a configuration map with entries + * + *
      + *
    • {@code approximate} and value set with {@link #approximate}. + *
    + * + * @return config the configuration map + */ + @Override + public Map getConfig() { + Map config = new HashMap<>(); + config.put("name", NAME); + config.put("approximate", approximate); + return config; + } + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return gelu(tf, input, approximate); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } + + /** + * Gets the flag whether to enable approximation. + * + * @return the flag whether to enable approximation. + */ + public boolean isApproximate() { + return approximate; + } +} diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java index 4365e0cd14a..0221059b72e 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/HardSigmoid.java @@ -16,9 +16,12 @@ import static org.tensorflow.framework.utils.CastHelper.cast; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** * Hard sigmoid activation. @@ -28,31 +31,61 @@ *

    Defined as: * *

      - *
    • if x < -2.5: return 0 - *
    • if x > 2.5: return 1 - *
    • if -2.5 <= x <= 2.5: return 0.2 * x + 0.5 + *
    • {@code if x < -2.5: return 0} + *
    • {@code if x > 2.5: return 1} + *
    • {@code if -2.5 <= x <= 2.5: return 0.2 * x + 0.5} *
    * *

    For example: * - *

    - *     Operand<TFloat32> input = tf.constant(
    - *              new float[] {-3.0f,-1.0f, 0.0f,1.0f,3.0f});
    - *     HardSigmoid<TFloat32> hardSigmoid = new HardSigmoid<>(tf);
    - *     Operand<TFloat32> result = hardSigmoid.call(input);
    - *     // result is [0.f , 0.3f, 0.5f, 0.7f, 1.f]
    - * 
    + *
    {@code
    + * Operand input = tf.constant(
    + *          new float[] {-3.0f,-1.0f, 0.0f,1.0f,3.0f});
    + * HardSigmoid hardSigmoid = new HardSigmoid<>(tf);
    + * Operand result = hardSigmoid.call(input);
    + * // result is [0.f , 0.3f, 0.5f, 0.7f, 1.f]
    + * }
    */ -public class HardSigmoid extends AbstractActivation { +public class HardSigmoid extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "hard_sigmoid"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates Hard sigmoid activation. */ public HardSigmoid() { super(); } - /** {@inheritDoc} */ - @Override - public Operand call(Ops tf, Operand input) { + /** + * Creates a new Exponential from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public HardSigmoid(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Computes the hard sigmoid activation function. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = HardSigmoid.hardSigmoid(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the exponential activation: {@code exp(x)}. + */ + public static Operand hardSigmoid(Ops tf, Operand input) { Class inputType = input.type(); Operand point2 = cast(tf, tf.constant(0.2), inputType); Operand point5 = cast(tf, tf.constant(0.5), inputType); @@ -61,4 +94,22 @@ public Operand call(Ops tf, Operand input) { return tf.clipByValue( x, cast(tf, tf.constant(0), inputType), cast(tf, tf.constant(1), inputType)); } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return hardSigmoid(tf, input); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java index d1a5eede616..37de37e5120 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Linear.java @@ -14,6 +14,9 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.types.family.TNumber; @@ -25,24 +28,72 @@ * *

    For example: * - *

    - *    Operand<TFloat32> input = tf.constant(
    - *              new float[] {-3.0f,-1.0f, 0.0f,1.0f,3.0f});
    - *    Linear<TFloat32> linear = new Linear<>(tf);
    - *    Operand<TFloat32> result = linear.call(input);
    - *    // result is [-3.0f,-1.0f, 0.0f,1.0f,3.0f]
    - * 
    + *
    {@code
    + * Operand input = tf.constant(
    + *           new float[] {-3.0f,-1.0f, 0.0f,1.0f,3.0f});
    + * Linear linear = new Linear<>(tf);
    + * Operand result = linear.call(input);
    + * // result is [-3.0f,-1.0f, 0.0f,1.0f,3.0f]
    + * }
    */ -public class Linear extends AbstractActivation { +public class Linear extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "linear"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a linear activation. */ public Linear() { super(); } + /** + * Creates a new Exponential from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Linear(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Computes the linear activation function (pass-through). + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Linear.linear(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the input, unmodified. + */ + @SuppressWarnings("unused") + public static Operand linear(Ops tf, Operand input) { + return input; + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return input; + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return linear(tf, input); + } + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java index 44dd3bc3b46..6ce1f56f53a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/ReLU.java @@ -16,6 +16,11 @@ import static org.tensorflow.framework.utils.CastHelper.cast; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; import org.tensorflow.op.math.Greater; @@ -25,7 +30,7 @@ /** * Rectified Linear Unit(ReLU) activation. * - *

    With default values, this returns the standard ReLU activation: max(x, 0), the + *

    With default values, this returns the standard ReLU activation: {@code max(x, 0)}, the * element-wise maximum of 0 and the input tensor. * *

    Modifying default parameters allows you to use non-zero thresholds, change the max value of @@ -33,39 +38,40 @@ * *

    For example: * - *

    - *     Operand<TFloat32> input = tf.constant(
    - *              new float[] {-10f, -5f, 0.0f, 5f, 10f});
    + * 
    {@code
    + * Operand input = tf.constant(
    + *          new float[] {-10f, -5f, 0.0f, 5f, 10f});
      *
    - *     // With default parameters
    - *     ReLU<TFloat32> relu = new ReLU<>(tf);
    - *     Operand<TFloat32> result = relu.call(input);
    - *     // result is [0.f,  0.f,  0.f,  5.f, 10.f]
    + * // With default parameters
    + * ReLU relu = new ReLU<>(tf);
    + * Operand result = relu.call(input);
    + * // result is [0.f,  0.f,  0.f,  5.f, 10.f]
      *
    - *     // With alpha = 0.5
    - *     relu = new ReLU<>(tf, 0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT);
    - *     result = relu.call(input);
    - *     // result is [-5.f , -2.5f,  0.f ,  5.f , 10.f]
    + * // With alpha = 0.5
    + * relu = new ReLU<>(tf, 0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT);
    + * result = relu.call(input);
    + * // result is [-5.f , -2.5f,  0.f ,  5.f , 10.f]
      *
    - *     // With maxValue = 5
    - *     relu = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, 5f, ReLU.THRESHOLD_DEFAULT);
    - *     result = relu.call(input);
    - *     // result is [0.f, 0.f, 0.f, 5.f, 5.f]
    + * // With maxValue = 5
    + * relu = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, 5f, ReLU.THRESHOLD_DEFAULT);
    + * result = relu.call(input);
    + * // result is [0.f, 0.f, 0.f, 5.f, 5.f]
      *
    - *     // With threshold = 5
    - *     relu = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5f);
    - *     result = relu.call(input);
    - *     // result is [-0.f, -0.f,  0.f,  0.f, 10.f]
    - * 
    - * - * @param the data type of the result + * // With threshold = 5 + * relu = new ReLU<>(tf, ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5f); + * result = relu.call(input); + * // result is [-0.f, -0.f, 0.f, 0.f, 10.f] + * }
    */ -public class ReLU extends AbstractActivation { +public class ReLU extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "relu"; public static final float ALPHA_DEFAULT = 0.0f; public static final float MAX_VALUE_DEFAULT = Float.NaN; public static final float THRESHOLD_DEFAULT = 0.0f; - + private static final Set allowedConfigKeys = + new HashSet<>(Arrays.asList(ReLU.NAME_KEY, "alpha", "max_value", "threshold")); private final float alpha; private final float maxValue; private final float threshold; @@ -93,9 +99,70 @@ public ReLU(float alpha, float maxValue, float threshold) { this.threshold = threshold; } - /** {@inheritDoc} */ - @Override - public Operand call(Ops tf, Operand input) { + /** + * Creates a ReLU activation from a config map. + * + * @param config the configuration map, + *
      + *
    • if the map contains an entry for {@code alpha} that value is used, otherwise {@link + * #ALPHA_DEFAULT} is used. + *
    • if the map contains an entry for {@code max_value} that value is used, otherwise + * {@link #MAX_VALUE_DEFAULT} is used. + *
    • if the map contains an entry for {@code threshold} that value is used, otherwise + * {@link #THRESHOLD_DEFAULT} is used. + *
    + * + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public ReLU(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + this.alpha = ((Number) config.getOrDefault("alpha", ALPHA_DEFAULT)).floatValue(); + this.maxValue = ((Number) config.getOrDefault("max_value", MAX_VALUE_DEFAULT)).floatValue(); + this.threshold = ((Number) config.getOrDefault("threshold", THRESHOLD_DEFAULT)).floatValue(); + } + + /** + * Applies the rectified linear unit activation function with default values. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = ReLU.relu(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the input, unmodified. + */ + public static Operand relu(Ops tf, Operand input) { + return relu(tf, input, ALPHA_DEFAULT, MAX_VALUE_DEFAULT, THRESHOLD_DEFAULT); + } + + /** + * Applies the rectified linear unit activation function. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = ReLU.relu(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param alpha governs the slope for values lower than the threshold. + * @param maxValue sets the saturation threshold (the largest value the function will return). + * @param threshold the threshold value of the activation function below which values will be + * damped or set to zero. + * @param the data type for the input + * @return the input, unmodified. + */ + public static Operand relu( + Ops tf, Operand input, float alpha, float maxValue, float threshold) { Class inputType = input.type(); boolean clipMax = !Float.isNaN(maxValue); @@ -137,4 +204,67 @@ public Operand call(Ops tf, Operand input) { } return lInput; } + + /** + * Gets a configuration map with entries + * + *
      + *
    • {@code alpha} and value set with {@link #alpha}. + *
    • {@code max_value} and value set with {@link #maxValue}. + *
    • {@code threshold} and value set with {@link #threshold}. + *
    + * + * @return config the configuration map + */ + @Override + public Map getConfig() { + Map config = new HashMap<>(); + config.put("name", NAME); + config.put("alpha", alpha); + config.put("max_value", maxValue); + config.put("threshold", threshold); + return config; + } + + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return relu(tf, input, alpha, maxValue, threshold); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } + + /** + * Gets the value that governs the slope for values lower than the threshold. + * + * @return the value that governs the slope for values lower than the threshold. + */ + public float getAlpha() { + return alpha; + } + + /** + * Gets the saturation threshold (the largest value the function will return). + * + * @return the saturation threshold (the largest value the function will return). public float + * getMaxValue() { return maxValue; } + *

    /** Gets the threshold value of the activation function below which values will be + * damped or set to zero. + */ + public float getThreshold() { + return threshold; + } + + /** + * Gets the saturation threshold (the largest value the function will return). + * + * @return the saturation threshold (the largest value the function will return). + */ + public float getMaxValue() { + return maxValue; + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java index a28052486e5..9d0b45e8e18 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/SELU.java @@ -14,9 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** * Scaled Exponential Linear Unit (SELU). @@ -24,37 +27,83 @@ *

    The Scaled Exponential Linear Unit (SELU) activation function is defined as: * *

      - *
    • if x > 0: return scale * x - *
    • if x < 0: return scale * alpha * (exp(x) - 1) + *
    • {@code if x > 0: return scale * x} + *
    • {@code if x < 0: return scale * alpha * (exp(x) - 1)} *
    * - *

    where alpha and scale are pre-defined constants ( - * alpha=1.67326324 and scale=1.05070098). + *

    where {@code alpha} and {@code scale} are pre-defined constants ({@code alpha=1.67326324} and + * {@code scale=1.05070098}). * - *

    Basically, the SELU activation function multiplies scale (> 1) with the output - * of the elu function to ensure a slope larger than one for positive inputs. + *

    Basically, the SELU activation function multiplies {@code scale} (> 1) with the output of the + * elu function to ensure a slope larger than one for positive inputs. * - *

    The values of alpha and scale are chosen so that the mean and - * variance of the inputs are preserved between two consecutive layers as long as the weights are - * initialized correctly (see {@link org.tensorflow.framework.initializers.LeCun} with Normal - * Distribution) and the number of input units is "large enough" + *

    The values of {@code alpha} and {@code scale} are chosen so that the mean and variance of the + * inputs are preserved between two consecutive layers as long as the weights are initialized + * correctly (see {@link org.tensorflow.framework.initializers.LeCun} with Normal Distribution) and + * the number of input units is "large enough" * *

    Notes: To be used together with the {@link * org.tensorflow.framework.initializers.LeCun} initializer with Normal Distribution. * - * @param the data type of the activation * @see Klambauer et al., 2017 */ -public class SELU extends AbstractActivation { +public class SELU extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "selu"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a Scaled Exponential Linear Unit (SELU) activation. */ public SELU() { super(); } + /** + * Creates a new Exponential from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public SELU(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies Scaled Exponential Linear Unit (SELU) activation function + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = SELU.selu(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the input, unmodified. + */ + public static Operand selu(Ops tf, Operand input) { + return tf.nn.selu(input); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return tf.nn.selu(input); + public Operand call(Ops tf, Operand input) { + return selu(tf, input); + } + + /** {@inheritDoc} */ + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java index 02b2daae4d6..e77f148a233 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Sigmoid.java @@ -14,43 +14,91 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** - * Sigmoid activation. sigmoid(x) = 1 / (1 + exp(-x)). + * Sigmoid activation. {@code sigmoid(x) = 1 / (1 + exp(-x))}. * - *

    Applies the sigmoid activation function. For small values (<-5), sigmoid - * returns a value close to zero, and for large values (>5) the result of the function gets close - * to 1. + *

    Applies the sigmoid activation function. For small values {@code (<-5)}, {@code sigmoid} + * returns a value close to zero, and for large values (>5) the result of the function gets close to + * 1. * *

    Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. * The sigmoid function always returns a value between 0 and 1. * *

    For example: * - *

    - *     Operand<TFloat32> input = tf.constant(
    - *              new float[] {-20f, -1.0f, 0.0f, 1.0f, 20f});
    - *     Sigmoid<TFloat32> sigmoid = new Sigmoid<>(tf);
    - *     Operand<TFloat32> result = sigmoid.call(input);
    - *     // result is [2.0611537e-09f, 2.6894143e-01f,
    - *     //                 5.0000000e-01f,7.3105860e-01f, 1.f]
    - * 
    - * - * @param the data type of the activation + *
    {@code
    + * Operand input = tf.constant(
    + *          new float[] {-20f, -1.0f, 0.0f, 1.0f, 20f});
    + * Sigmoid sigmoid = new Sigmoid<>(tf);
    + * Operand result = sigmoid.call(input);
    + * // result is [2.0611537e-09f, 2.6894143e-01f,
    + * //                 5.0000000e-01f,7.3105860e-01f, 1.f]
    + * }
    */ -public class Sigmoid extends AbstractActivation { +public class Sigmoid extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "sigmoid"; + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a Sigmoid activation. */ public Sigmoid() { super(); } + /** + * Creates a new Exponential from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Sigmoid(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies the Sigmoid activation function, {@code sigmoid(x) = 1 / (1 + exp(-x))}. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Sigmoid.sigmoid(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the input, unmodified. + */ + public static Operand sigmoid(Ops tf, Operand input) { + return tf.math.sigmoid(input); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return tf.math.sigmoid(input); + public Operand call(Ops tf, Operand input) { + return sigmoid(tf, input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java index 3aa67a179ad..5d27fce5194 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softmax.java @@ -14,32 +14,40 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.ndarray.Shape; import org.tensorflow.op.Ops; import org.tensorflow.op.core.ReduceMax; import org.tensorflow.op.core.ReduceSum; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.family.TNumber; /** * Softmax converts a real vector to a vector of categorical probabilities. * *

    The elements of the output vector are in range (0, 1) and sum to 1. * - *

    Each vector is handled independently. The axis argument sets which axis of the - * input the function is applied along. + *

    Each vector is handled independently. The {@code axis} argument sets which axis of the input + * the function is applied along. * *

    Softmax is often used as the activation for the last layer of a classification network because * the result could be interpreted as a probability distribution. * - *

    The softmax of each vector x is computed as: exp(x) / tf.sum(exp(x)). + *

    The softmax of each vector x is computed as: {@code exp(x) / tf.sum(exp(x))}. * *

    The input values in are the log-odds of the resulting probability. - * - * @param the data type of the activation */ -public class Softmax extends AbstractActivation { +public class Softmax extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "softmax"; + private static final Set allowedConfigKeys = + new HashSet<>(Arrays.asList(Softmax.NAME_KEY, "axis")); private static final int AXIS_DEFAULT = -1; private final int axis; @@ -62,19 +70,112 @@ public Softmax(int axis) { this.axis = axis; } - /** {@inheritDoc} */ - @Override - public Operand call(Ops tf, Operand input) { + /** + * Creates a Softmax activation from a config map. + * + * @param config the configuration map, if the map contains an entry for {@code axis} that value + * is used, otherwise {@link #AXIS_DEFAULT} is used. + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Softmax(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + this.axis = (Integer) config.getOrDefault("axis", AXIS_DEFAULT); + } + + /** + * Converts a vector of values to a probability distribution along the last axis. + * + *

    The elements of the output vector are in range (0, 1) and sum to 1. + * + *

    Each vector is handled independently. The {@code axis} argument sets which axis of the input + * the function is applied along. + * + *

    Softmax is often used as the activation for the last layer of a classification network + * because the result could be interpreted as a probability distribution. + * + *

    The softmax of each vector x is computed as {@code exp(x) / tf.reduce_sum(exp(x))}. The + * input values in are the log-odds of the resulting probability. + * + * @param tf the TensorFlow Ops + * @param input the input + * @param teh data type of the input + * @return the output of softmax transformation (all values are non-negative and sum to 1). + */ + public static Operand softmax(Ops tf, Operand input) { + return softmax(tf, input, tf.constant(AXIS_DEFAULT)); + } + + /** + * Converts a vector of values to a probability distribution. + * + *

    The elements of the output vector are in range (0, 1) and sum to 1. + * + *

    Each vector is handled independently. The {@code axis} argument sets which axis of the input + * the function is applied along. + * + *

    Softmax is often used as the activation for the last layer of a classification network + * because the result could be interpreted as a probability distribution. + * + *

    The softmax of each vector x is computed as {@code exp(x) / tf.reduce_sum(exp(x))}. The + * input values in are the log-odds of the resulting probability. + * + * @param tf the TensorFlow Ops + * @param input the input + * @param axis Integer, axis along which the softmax normalization is applied. + * @param teh data type of the input + * @return the output of softmax transformation (all values are non-negative and sum to 1). + */ + public static Operand softmax( + Ops tf, Operand input, Operand axis) { Shape shape = input.shape(); int numDimensions = shape.numDimensions(); if (numDimensions == 2) { return tf.nn.softmax(input); } else { Operand e = - tf.math.exp( - tf.math.sub(input, tf.reduceMax(input, tf.constant(axis), ReduceMax.keepDims(true)))); - Operand s = tf.reduceSum(e, tf.constant(axis), ReduceSum.keepDims(true)); + tf.math.exp(tf.math.sub(input, tf.reduceMax(input, axis, ReduceMax.keepDims(true)))); + Operand s = tf.reduceSum(e, axis, ReduceSum.keepDims(true)); return tf.math.div(e, s); } } + + /** + * Gets a configuration map with entries + * + *

      + *
    • {@code axis} and value set with {@link #axis}. + *
    + * + * @return config the configuration map + */ + @Override + public Map getConfig() { + Map config = new HashMap<>(); + config.put("name", NAME); + config.put("axis", axis); + return config; + } + + /** {@inheritDoc} */ + @Override + public Operand call(Ops tf, Operand input) { + return softmax(tf, input, tf.constant(axis)); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; + } + + /** + * Gets the axis along which the softmax normalization is applied. + * + * @return the axis along which the softmax normalization is applied. + */ + public int getAxis() { + return axis; + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java index 8533de7852c..6e6c85eccdb 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softplus.java @@ -14,34 +14,84 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** - * Softplus activation function, softplus(x) = log(exp(x) + 1). + * Softplus activation function, {@code softplus(x) = log(exp(x) + 1)}. * *

    Example Usage: * - *

    - *     Operand<TFloat32> input = tf.constant(
    - *              new float[] {-20f, -1.0f, 0.0f, 1.0f, 20f});
    - *     Softplus<TFloat32> softplus = new Softplus<>(tf);
    - *     Operand<TFloat32> result = softplus.call(input);
    - *     // result is [2.0611537e-09f, 3.1326166e-01f, 6.9314718e-01f,
    - *     //                 1.3132616e+00f, 2.0000000e+01f]
    - * 
    + *
    {@code
    + * Operand input = tf.constant(
    + *          new float[] {-20f, -1.0f, 0.0f, 1.0f, 20f});
    + * Softplus softplus = new Softplus<>(tf);
    + * Operand result = softplus.call(input);
    + * // result is [2.0611537e-09f, 3.1326166e-01f, 6.9314718e-01f,
    + * //                 1.3132616e+00f, 2.0000000e+01f]
    + * }
    */ -public class Softplus extends AbstractActivation { +public class Softplus extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "softplus"; + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a Softplus activation function. */ public Softplus() { super(); } + /** + * Creates a new Softplus from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Softplus(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies the Softplus activation function, {@code softplus(x) = log(exp(x) + 1)}. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Softplus.softplus(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the Softplus activation, {@code softplus(x) = log(exp(x) + 1)}. + */ + public static Operand softplus(Ops tf, Operand input) { + return tf.math.softplus(input); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return tf.math.softplus(input); + public Operand call(Ops tf, Operand input) { + return softplus(tf, input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java index 249fa6077cd..75df51409f0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Softsign.java @@ -14,35 +14,83 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** - * Softsign activation function, softsign(x) = x / (abs(x) + 1). + * Softsign activation function, {@code softsign(x) = x / (abs(x) + 1)}. * *

    Example Usage: * - *

    - *     Operand<TFloat32> input = tf.constant(
    - *              new float[] {-1.0f, 0.0f, 1.0f});
    - *     Softsign<TFloat32> softsign = new Softsign<>(tf);
    - *     Operand<TFloat32> result = softsign.call(input);
    - *     // result is [-0.5f, 0.f, 0.5f]
    - * 
    - * - * @param the data type of the activation + *
    {@code
    + * Operand input = tf.constant(
    + *          new float[] {-1.0f, 0.0f, 1.0f});
    + * Softsign softsign = new Softsign<>(tf);
    + * Operand result = softsign.call(input);
    + * // result is [-0.5f, 0.f, 0.5f]
    + * }
    */ -public class Softsign extends AbstractActivation { +public class Softsign extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "softsign"; + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a Softsign activation. */ public Softsign() { super(); } + /** + * Creates a new Softsign from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Softsign(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies the Softsign activation function, {@code softsign(x) = x / (abs(x) + 1)}. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Softsign.softsign(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the Softsign activation, {@code softsign(x) = x / (abs(x) + 1)}. + */ + public static Operand softsign(Ops tf, Operand input) { + return tf.nn.softsign(input); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return tf.nn.softsign(input); + public Operand call(Ops tf, Operand input) { + return softsign(tf, input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java index 5007dd34555..5b962f5687e 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Swish.java @@ -14,49 +14,99 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** - * Swish activation function. swish(x) = x * sigmoid(x). + * Swish activation function. {@code swish(x) = x * sigmoid(x)}. * - *

    Swish activation function which returns x*sigmoid(x). It is a smooth, - * non-monotonic function that consistently matches or outperforms ReLU on deep - * networks, it is unbounded above and bounded below. + *

    Swish activation function which returns {@code x*sigmoid(x)}. It is a smooth, non-monotonic + * function that consistently matches or outperforms {@code ReLU} on deep networks, it is unbounded + * above and bounded below. * *

    Example Usage: * - *

    - *     Operand<TFloat32> input = tf.constant(new float[]
    - *                                        {-20, -1.0, 0.0, 1.0, 20});
    - *     Swish<TFloat32> swish = new Swish<>(tf);
    - *     Operand<TFloat32> result = swish.call(input);
    - *     // result = [-4.1223075e-08f, -2.6894143e-01f,  0.0000000e+00f,
    - *     //          7.3105860e-01f,  2.0000000e+01f ]
    + * 
    {@code
    + * Operand input = tf.constant(new float[]
    + *                                    {-20, -1.0, 0.0, 1.0, 20});
    + * Swish swish = new Swish<>(tf);
    + * Operand result = swish.call(input);
    + * // result = [-4.1223075e-08f, -2.6894143e-01f,  0.0000000e+00f,
    + * //          7.3105860e-01f,  2.0000000e+01f ]
      *
    - * 
    + * }
    * - * @param the data type of the activation * @see Ramachandran et al., 2017 */ -public class Swish extends AbstractActivation { +public class Swish extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "swish"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** - * Creates a Swish activation, swish(x) = x * sigmoid(x). + * Creates a Swish activation, {@code swish(x) = x * sigmoid(x)}. * - *

    Swish activation function which returns x*sigmoid(x). It is a smooth, - * non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is - * unbounded above and bounded below. + *

    Swish activation function which returns {@code x*sigmoid(x)}. It is a smooth, non-monotonic + * function that consistently matches or outperforms ReLU on deep networks, it is unbounded above + * and bounded below. */ public Swish() { super(); } + /** + * Creates a new Swish from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Swish(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies the Swish activation function, {@code swish(x) = x * sigmoid(x)}. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Swish.swish(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the Swish activation , @code swish(x) = x * sigmoid(x)}. + */ + public static Operand swish(Ops tf, Operand input) { + return tf.math.mul(input, tf.math.sigmoid(input)); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { + public Operand call(Ops tf, Operand input) { // TODO Python Keras returns a "grad", which is an optimization not implemented in Java. - return tf.math.mul(input, tf.math.sigmoid(input)); + return swish(tf, input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java index 37d4d811a0d..814570e9447 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/activations/Tanh.java @@ -14,35 +14,86 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import org.tensorflow.Operand; import org.tensorflow.op.Ops; -import org.tensorflow.types.family.TFloating; +import org.tensorflow.types.family.TNumber; /** * Hyperbolic tangent activation function. * *

    For example: * - *

    - *     Operand<TFloat32> input = tf.constant(new float[]
    - *                                        {-3.0f,-1.0f, 0.0f, 1.0f, 3.0f});
    - *     Tanh<TFloat32> tanh = new Tanh<>(tf);
    - *     Operand<TFloat32> result = tanh.call(input);
    - *     // result = [-0.9950547f, -0.7615942f,  0.f,  0.7615942f,  0.9950547f]
    - * 
    - * - * @param the data type of the activation + *
    {@code
    + * Operand input = tf.constant(new float[]
    + *                                    {-3.0f,-1.0f, 0.0f, 1.0f, 3.0f});
    + * Tanh tanh = new Tanh<>(tf);
    + * Operand result = tanh.call(input);
    + * // result = [-0.9950547f, -0.7615942f,  0.f,  0.7615942f,  0.9950547f]
    + * }
    */ -public class Tanh extends AbstractActivation { +public class Tanh extends AbstractActivation { + /** The activation name as known by TensorFlow */ + public static final String NAME = "tanh"; + + private static final Set allowedConfigKeys = Collections.singleton(NAME_KEY); /** Creates a Hyperbolic tangent activation. */ public Tanh() { super(); } + /** + * Creates a new Tanh from a configuration Map + * + * @param config the configuration map, this class does not use any of the entries in the + * configuration map + * @throws IllegalArgumentException if the configuration contains unsupported keys for this class + * or if the value for the name key does not match the name for the Activation + */ + public Tanh(Map config) { + checkConfigKeys(config.keySet(), allowedConfigKeys); + checkClassName(config); + } + + /** + * Applies the Hyperbolic tangent activation function, {@code tanh(x) = sinh(x)/cosh(x) = ((exp(x) + * - exp(-x))/(exp(x) + exp(-x)))}. + * + *

    Example Usage: + * + *

    {@code
    +   * Operand input = ...;
    +   * Operand result = Tanh.tanh(tf, input);
    +   * }
    + * + * @param tf the TensorFlow Ops + * @param input the input + * @param the data type for the input + * @return the Hyperbolic tangent activation, {@code tanh(x) = sinh(x)/cosh(x) = ((exp(x) - + * exp(-x))/(exp(x) + exp(-x)))}. + */ + public static Operand tanh(Ops tf, Operand input) { + return tf.math.tanh(input); + } + /** {@inheritDoc} */ @Override - public Operand call(Ops tf, Operand input) { - return tf.math.tanh(input); + public Operand call(Ops tf, Operand input) { + return tanh(tf, input); + } + + /** {@inheritDoc} */ + @Override + public Map getConfig() { + return getDefaultConfig(getName()); + } + + /** {@inheritDoc} */ + @Override + public String getName() { + return NAME; } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java index 96f023ffedf..e02f38286b0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/NnOps.java @@ -15,6 +15,7 @@ package org.tensorflow.framework.op; import org.tensorflow.Operand; +import org.tensorflow.framework.op.nn.GELU; import org.tensorflow.framework.op.nn.SigmoidCrossEntropyWithLogits; import org.tensorflow.framework.op.nn.SoftmaxCrossEntropyWithLogits; import org.tensorflow.framework.op.nn.SparseSoftmaxCrossEntropyWithLogits; @@ -190,4 +191,35 @@ public Operand sparseSoftmaxCrossEntro return SparseSoftmaxCrossEntropyWithLogits.sparseSoftmaxCrossEntropyWithLogits( scope, labels, logits); } + + /** + * Compute the Gaussian Error Linear Unit (GELU) activation function without approximation. + * + *

    Gaussian error linear unit (GELU) computes {@code x * P(X <= x)}, where {@code P(X) ~ N(0, + * 1)}. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their + * sign as in ReLU. + * + * @param input the input + * @param the data type for the input and result + * @return The Gaussian Error Linear Unit computation + */ + public Operand gelu(Operand input) { + return GELU.gelu(scope, input); + } + + /** + * Compute the Gaussian Error Linear Unit (GELU) activation function. + * + *

    Gaussian error linear unit (GELU) computes {@code x * P(X <= x)}, where {@code P(X) ~ N(0, + * 1)}. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their + * sign as in ReLU. + * + * @param input the input + * @param approximate Whether to enable approximation. + * @param the data type for the input and result + * @return The Gaussian Error Linear Unit computation + */ + public Operand gelu(Operand input, boolean approximate) { + return GELU.gelu(scope, input, approximate); + } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/GELU.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/GELU.java new file mode 100644 index 00000000000..d1f583a31d3 --- /dev/null +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/GELU.java @@ -0,0 +1,102 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.op.nn; + +import org.tensorflow.Operand; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.Operator; +import org.tensorflow.op.core.Constant; +import org.tensorflow.op.dtypes.Cast; +import org.tensorflow.op.math.Add; +import org.tensorflow.op.math.Div; +import org.tensorflow.op.math.Erf; +import org.tensorflow.op.math.Mul; +import org.tensorflow.op.math.Pow; +import org.tensorflow.op.math.Tanh; +import org.tensorflow.types.family.TNumber; + +/** + * The Gaussian Error Linear Unit (GELU) activation function. + * + *

    Gaussian error linear unit (GELU) computes {@code x * P(X <= x)}, where {@code P(X) ~ N(0, + * 1)}. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their + * sign as in ReLU. + * + * @see Gaussian Error Linear Units (GELUs) + */ +@Operator(group = "nn") +public class GELU { + + /** + * Compute the Gaussian Error Linear Unit (GELU) activation function without approximation. + * + * @param scope The TensorFlow scope + * @param input the input + * @param the data type for the input and result + * @return The Gaussian Error Linear Unit + */ + @Endpoint(name = "gelu") + public static Operand gelu(Scope scope, Operand input) { + return gelu(scope, input, false); + } + + /** + * Compute the Gaussian Error Linear Unit (GELU) activation function. + * + * @param scope The TensorFlow scope + * @param input the input + * @param approximate Whether to enable approximation. + * @param the data type for the input and result + * @return The Gaussian Error Linear Unit computation + */ + @Endpoint(name = "gelu") + public static Operand gelu( + Scope scope, Operand input, boolean approximate) { + Cast point5 = Cast.create(scope, Constant.scalarOf(scope, 0.5), input.type()); + Cast one = Cast.create(scope, Constant.scalarOf(scope, 1.0), input.type()); + Mul inputMul = Mul.create(scope, point5, input); + if (approximate) { + Operand coeff = Cast.create(scope, Constant.scalarOf(scope, 0.044715), input.type()); + Operand tanhMul = + Cast.create(scope, Constant.scalarOf(scope, 0.7978845608028654), input.type()); + Operand three = Cast.create(scope, Constant.scalarOf(scope, 3), input.type()); + return Mul.create( + scope, + inputMul, + Add.create( + scope, + one, + Tanh.create( + scope, + Mul.create( + scope, + tanhMul, + Add.create( + scope, + input, + Mul.create(scope, coeff, Pow.create(scope, input, three))))))); + + } else { + Operand mulConstant = + Cast.create(scope, Constant.scalarOf(scope, 1.4142135623730951), input.type()); + + return Mul.create( + scope, + inputMul, + Add.create(scope, one, Erf.create(scope, Div.create(scope, input, mulConstant)))); + } + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ActivationTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ActivationTest.java new file mode 100644 index 00000000000..327d23f6033 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ActivationTest.java @@ -0,0 +1,93 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.activations; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Collections; +import org.junit.jupiter.api.Test; + +public class ActivationTest { + + /** Test of Activation create method */ + @Test + public void testCreateActivation() { + assertTrue(Activation.create("elu") instanceof ELU); + assertTrue(Activation.create("exponential") instanceof Exponential); + assertTrue(Activation.create("gelu") instanceof GELU); + assertTrue(Activation.create("hard_sigmoid") instanceof HardSigmoid); + assertTrue(Activation.create("linear") instanceof Linear); + assertTrue(Activation.create("relu") instanceof ReLU); + assertTrue(Activation.create("selu") instanceof SELU); + assertTrue(Activation.create("sigmoid") instanceof Sigmoid); + assertTrue(Activation.create("softmax") instanceof Softmax); + assertTrue(Activation.create("softplus") instanceof Softplus); + assertTrue(Activation.create("softsign") instanceof Softsign); + assertTrue(Activation.create("swish") instanceof Swish); + assertTrue(Activation.create("tanh") instanceof Tanh); + } + + /** Test of Activation create method */ + @Test + public void testCreateActivationConfig() { + + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "elu")) + instanceof ELU); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "exponential")) + instanceof Exponential); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "gelu")) + instanceof GELU); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "hard_sigmoid")) + instanceof HardSigmoid); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "linear")) + instanceof Linear); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "relu")) + instanceof ReLU); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "selu")) + instanceof SELU); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "sigmoid")) + instanceof Sigmoid); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "softmax")) + instanceof Softmax); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "softplus")) + instanceof Softplus); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "softsign")) + instanceof Softsign); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "swish")) + instanceof Swish); + assertTrue( + Activation.create(Collections.singletonMap(AbstractActivation.NAME_KEY, "tanh")) + instanceof Tanh); + } + + /** Test of Activation create method */ + @Test + public void testCreateUnknownActivation() { + assertThrows(IllegalArgumentException.class, () -> Activation.create("bogus")); + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java index 9f3fa75e95d..3f54750a0cd 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ELUTest.java @@ -14,6 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -33,7 +40,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(); + ELU instance = new ELU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -47,7 +54,7 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(); + ELU instance = new ELU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -61,9 +68,49 @@ public void testAlpha() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ELU instance = new ELU<>(2.0f); + ELU instance = new ELU(2.0f); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(ELU.NAME); + assertTrue(instance instanceof ELU); + + Map config = new HashMap<>(); + config.put("alpha", 2.0f); + config.put(AbstractActivation.NAME_KEY, ELU.NAME); + + instance = Activation.create(config); + assertTrue(instance instanceof ELU); + assertEquals(2.0, ((ELU) instance).getAlpha()); + + instance = Activation.create("elu"); + assertNotNull(instance); + assertEquals(1.0f, ((ELU) instance).getAlpha()); + } + + @Test + public void testGetConfig() { + ELU instance = new ELU(2.0f); + Map config = instance.getConfig(); + assertEquals(ELU.NAME, config.get(AbstractActivation.NAME_KEY)); + assertEquals(2.0f, ((Number) config.get("alpha")).floatValue()); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(AbstractActivation.NAME_KEY, ELU.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put("alpha", 2.0f); + configBadClass.put(AbstractActivation.NAME_KEY, "bogus"); + assertThrows(IllegalArgumentException.class, () -> new ELU(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java index f82c19987d1..6f384ca531f 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ExponentialTest.java @@ -14,6 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +29,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class ExponentialTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -42,7 +49,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Exponential instance = new Exponential<>(); + Exponential instance = new Exponential(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -60,9 +67,38 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Exponential instance = new Exponential<>(); + Exponential instance = new Exponential(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Exponential.NAME); + assertTrue(instance instanceof Exponential); + Exponential exponential = + new Exponential(Collections.singletonMap(Exponential.NAME_KEY, Exponential.NAME)); + assertNotNull(exponential); + } + + @Test + public void testGetConfig() { + Exponential instance = new Exponential(); + assertEquals(Exponential.NAME, instance.getConfig().get(Exponential.NAME_KEY)); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Exponential.NAME_KEY, Exponential.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Exponential.NAME_KEY, "bogus"); + assertThrows(IllegalArgumentException.class, () -> new Exponential(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/GELUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/GELUTest.java new file mode 100644 index 00000000000..f34344639d8 --- /dev/null +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/GELUTest.java @@ -0,0 +1,148 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ +package org.tensorflow.framework.activations; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.Test; +import org.tensorflow.Operand; +import org.tensorflow.framework.utils.TestSession; +import org.tensorflow.op.Ops; +import org.tensorflow.types.TFloat64; + +public class GELUTest { + + private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; + + /** Test of GELU call method */ + @Test + public void testCallNoAproximate() { + double[][] input = { + { + 0.18463433217903202, + 0.7748109168364575, + 0.5901821703159541, + 0.11772865177047143, + 0.39442705615436113 + }, + { + 0.14569713527198846, + 0.022968622140421502, + 0.19299343598670116, + 0.8063201076826957, + 0.2908883528612243 + } + }; + + double[][] expected = { + {0.10584016, 0.60495245, 0.42638642, 0.06438094, 0.2577057}, + {0.08128731, 0.011694758, 0.11126418, 0.63696945, 0.178731} + }; + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + GELU instance = new GELU(); + Operand result = instance.call(tf, tf.constant(input)); + session.evaluate(tf.constant(expected), result); + } + } + + /** Test of GELU call method */ + @Test + public void testCallAproximate() { + double[][] input = { + { + 0.7528694935987742, + 0.9496349687413689, + 0.6676759543267402, + 0.5424082144274655, + 0.16766158529053699 + }, + { + 0.1022611836463726, + 0.7906577638705719, + 0.9607832735098116, + 0.5693112764986582, + 0.731933160073661 + } + }; + + double[][] expected = { + { + 0.5828287788543643, + 0.7869710854047693, + 0.4992606099752831, + 0.38304258917872785, + 0.09499264800335701 + }, + { + 0.055295175281590385, + 0.620923776708609, + 0.798915192168026, + 0.40727355470673604, + 0.5619842135196427 + } + }; + + for (TestSession.Mode tfMode : tfModes) + try (TestSession session = TestSession.createTestSession(tfMode)) { + Ops tf = session.getTF(); + GELU instance = new GELU(true); + Operand result = instance.call(tf, tf.constant(input)); + session.evaluate(tf.constant(expected), result); + } + } + + @Test + public void testConfig() { + Activation instance = Activation.create(GELU.NAME); + assertTrue(instance instanceof GELU); + + Map config = new HashMap<>(); + config.put("name", GELU.NAME); + config.put("approximate", true); + instance = Activation.create(config); + assertNotNull(instance); + assertTrue(((GELU) instance).isApproximate()); + } + + @Test + public void testGetConfig() { + GELU instance = new GELU(true); + Map config = instance.getConfig(); + assertTrue(config.containsKey("approximate")); + assertTrue(config.get("approximate") instanceof Boolean); + assertTrue((Boolean) config.get("approximate")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(GELU.NAME_KEY, GELU.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(GELU.NAME_KEY, "bogus"); + assertThrows(IllegalArgumentException.class, () -> new GELU(configBadClass)); + } +} diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java index 0e32201c3e6..9a059c12756 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/HardSigmoidTest.java @@ -14,6 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +29,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class HardSigmoidTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -33,7 +40,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - HardSigmoid instance = new HardSigmoid<>(); + HardSigmoid instance = new HardSigmoid(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -47,9 +54,38 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - HardSigmoid instance = new HardSigmoid<>(); + HardSigmoid instance = new HardSigmoid(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(HardSigmoid.NAME); + assertTrue(instance instanceof HardSigmoid); + HardSigmoid hardSigmoid = + new HardSigmoid(Collections.singletonMap(HardSigmoid.NAME_KEY, HardSigmoid.NAME)); + assertNotNull(hardSigmoid); + } + + @Test + public void testGetConfig() { + HardSigmoid instance = new HardSigmoid(); + assertEquals(HardSigmoid.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(HardSigmoid.NAME_KEY, HardSigmoid.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(HardSigmoid.NAME_KEY, "bogus"); + assertThrows(IllegalArgumentException.class, () -> new HardSigmoid(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java index 817940688e8..2541b903bd9 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/LinearTest.java @@ -14,6 +14,14 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -22,7 +30,6 @@ import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; -/** @author Jim Clarke */ public class LinearTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -34,7 +41,7 @@ public void testCallInt() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(); + Linear instance = new Linear(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -48,7 +55,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(); + Linear instance = new Linear(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -62,9 +69,37 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Linear instance = new Linear<>(); + Linear instance = new Linear(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Linear.NAME); + assertTrue(instance instanceof Linear); + Linear linear = new Linear(Collections.singletonMap(Linear.NAME_KEY, Linear.NAME)); + assertNotNull(linear); + } + + @Test + public void testGetConfig() { + Linear instance = new Linear(); + assertEquals(Linear.NAME, instance.getConfig().get(Linear.NAME_KEY)); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Linear.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Linear.NAME_KEY, "bogus"); + assertThrows(IllegalArgumentException.class, () -> new Linear(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java index 94f803d6b1c..584e674e58e 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/ReLUTest.java @@ -14,6 +14,13 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -24,7 +31,6 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; -/** @author Jim Clarke */ public class ReLUTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -36,7 +42,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(); + ReLU instance = new ReLU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -50,7 +56,7 @@ public void testCallInt() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(); + ReLU instance = new ReLU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -64,7 +70,7 @@ public void testCallLong() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(); + ReLU instance = new ReLU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -78,7 +84,7 @@ public void testCallFloat16() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(); + ReLU instance = new ReLU(); Operand result = instance.call(tf, tf.dtypes.cast(tf.constant(input), TFloat16.class)); session.evaluate(tf.dtypes.cast(tf.constant(expected), TFloat16.class), result); @@ -93,7 +99,7 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(); + ReLU instance = new ReLU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -106,7 +112,7 @@ public void testAlpha() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT); + ReLU instance = new ReLU(0.5f, ReLU.MAX_VALUE_DEFAULT, ReLU.THRESHOLD_DEFAULT); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -119,7 +125,7 @@ public void testMaxValue() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(ReLU.ALPHA_DEFAULT, 5, ReLU.THRESHOLD_DEFAULT); + ReLU instance = new ReLU(ReLU.ALPHA_DEFAULT, 5, ReLU.THRESHOLD_DEFAULT); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -132,9 +138,52 @@ public void testThreshold() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - ReLU instance = new ReLU<>(ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5.0f); + ReLU instance = new ReLU(ReLU.ALPHA_DEFAULT, ReLU.MAX_VALUE_DEFAULT, 5.0f); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(ReLU.NAME); + assertTrue(instance instanceof ReLU); + Map config = new HashMap<>(); + config.put("name", ReLU.NAME); + config.put("alpha", 2.0); + config.put("max_value", 25); + config.put("threshold", .95); + + instance = Activation.create(config); + assertNotNull(instance); + assertEquals(2.0f, ((ReLU) instance).getAlpha()); + assertEquals(25.0f, ((ReLU) instance).getMaxValue()); + assertEquals(.95f, ((ReLU) instance).getThreshold()); + } + + @Test + public void testGetConfig() { + ReLU instance = new ReLU(); + Map config = instance.getConfig(); + assertTrue(config.containsKey("alpha")); + assertEquals(instance.getAlpha(), ((Number) config.get("alpha")).floatValue()); + assertTrue(config.containsKey("max_value")); + assertEquals(instance.getMaxValue(), ((Number) config.get("max_value")).floatValue()); + assertTrue(config.containsKey("threshold")); + assertEquals(instance.getThreshold(), ((Number) config.get("threshold")).floatValue()); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(ReLU.NAME_KEY, ReLU.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(ReLU.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new ReLU(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java index df1cfb9bd05..dc9e92709d4 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SELUTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SELUTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -35,7 +40,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - SELU instance = new SELU<>(); + SELU instance = new SELU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -58,9 +63,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - SELU instance = new SELU<>(); + SELU instance = new SELU(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(SELU.NAME); + assertTrue(instance instanceof SELU); + } + + @Test + public void testGetConfig() { + SELU instance = new SELU(); + assertEquals(SELU.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(SELU.NAME_KEY, SELU.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(SELU.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new SELU(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java index 0c59eeaba6e..3945b796aff 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SigmoidTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SigmoidTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -42,7 +47,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Sigmoid instance = new Sigmoid<>(); + Sigmoid instance = new Sigmoid(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -60,9 +65,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Sigmoid instance = new Sigmoid<>(); + Sigmoid instance = new Sigmoid(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Sigmoid.NAME); + assertTrue(instance instanceof Sigmoid); + } + + @Test + public void testGetConfig() { + Sigmoid instance = new Sigmoid(); + assertEquals(Sigmoid.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Sigmoid.NAME_KEY, Sigmoid.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Sigmoid.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Sigmoid(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java index aeb971905a2..696030cafab 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftmaxTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SoftmaxTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -37,7 +42,7 @@ public void testSoftmaxOpsOperandFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(); + Softmax instance = new Softmax(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -54,7 +59,7 @@ public void testSoftmaxOpsOperandDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(); + Softmax instance = new Softmax(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -71,7 +76,7 @@ public void testSoftmaxOpsOperandDoubleNegative() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(); + Softmax instance = new Softmax(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -88,7 +93,7 @@ public void testSoftmax1D() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(); + Softmax instance = new Softmax(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } @@ -105,9 +110,35 @@ public void testSoftmax3D() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softmax instance = new Softmax<>(); + Softmax instance = new Softmax(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(tf.constant(expected), result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Softmax.NAME); + assertTrue(instance instanceof Softmax); + } + + @Test + public void testGetConfig() { + Softmax instance = new Softmax(); + assertEquals(Softmax.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Softmax.NAME_KEY, Softmax.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Softmax.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Softmax(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java index e896807d9f7..3407b5f7e11 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftplusTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SoftplusTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -36,7 +41,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softplus instance = new Softplus<>(); + Softplus instance = new Softplus(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -54,9 +59,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softplus instance = new Softplus<>(); + Softplus instance = new Softplus(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Softplus.NAME); + assertTrue(instance instanceof Softplus); + } + + @Test + public void testGetConfig() { + Softplus instance = new Softplus(); + assertEquals(Softplus.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Softplus.NAME_KEY, Softplus.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Softplus.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Softplus(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java index 2f9a17caf59..27d9c4631e4 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SoftsignTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SoftsignTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -34,7 +39,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softsign instance = new Softsign<>(); + Softsign instance = new Softsign(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -57,9 +62,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Softsign instance = new Softsign<>(); + Softsign instance = new Softsign(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Softsign.NAME); + assertTrue(instance instanceof Softsign); + } + + @Test + public void testGetConfig() { + Softsign instance = new Softsign(); + assertEquals(Softsign.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Softsign.NAME_KEY, Softsign.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Softsign.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Softsign(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java index 8dabfaf379a..4918e04589f 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/SwishTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class SwishTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -42,7 +47,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Swish instance = new Swish<>(); + Swish instance = new Swish(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -65,9 +70,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Swish instance = new Swish<>(); + Swish instance = new Swish(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Swish.NAME); + assertTrue(instance instanceof Swish); + } + + @Test + public void testGetConfig() { + Swish instance = new Swish(); + assertEquals(Swish.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Swish.NAME_KEY, Swish.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Swish.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Swish(configBadClass)); + } } diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java index 696f96a367e..dc458f6bf52 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/activations/TanhTest.java @@ -14,6 +14,12 @@ =======================================================================*/ package org.tensorflow.framework.activations; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Test; import org.tensorflow.Operand; import org.tensorflow.framework.utils.TestSession; @@ -21,7 +27,6 @@ import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; -/** @author Jim Clarke */ public class TanhTest { private final TestSession.Mode[] tfModes = {TestSession.Mode.EAGER, TestSession.Mode.GRAPH}; @@ -42,7 +47,7 @@ public void testCallFloat() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Tanh instance = new Tanh<>(); + Tanh instance = new Tanh(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } @@ -65,9 +70,35 @@ public void testCallDouble() { for (TestSession.Mode tfMode : tfModes) try (TestSession session = TestSession.createTestSession(tfMode)) { Ops tf = session.getTF(); - Tanh instance = new Tanh<>(); + Tanh instance = new Tanh(); Operand result = instance.call(tf, tf.constant(input)); session.evaluate(expected, result); } } + + @Test + public void testConfig() { + Activation instance = Activation.create(Tanh.NAME); + assertTrue(instance instanceof Tanh); + } + + @Test + public void testGetConfig() { + Tanh instance = new Tanh(); + assertEquals(Tanh.NAME, instance.getConfig().get("name")); + } + + /** Test of Activation create method with bad data */ + @Test + public void testBadConfig() { + + final Map configBadKey = new HashMap<>(); + configBadKey.put("beta", 2.0f); + configBadKey.put(Tanh.NAME_KEY, Tanh.NAME); + assertThrows(IllegalArgumentException.class, () -> Activation.create(configBadKey)); + + final Map configBadClass = new HashMap<>(); + configBadClass.put(Tanh.NAME_KEY, Linear.NAME); + assertThrows(IllegalArgumentException.class, () -> new Tanh(configBadClass)); + } } From c00e84ec7baa14c2dc051424cba27fe46df18474 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Sat, 26 Mar 2022 21:33:14 -0400 Subject: [PATCH 20/21] Use previous version of GitHub-hosted runners --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0fd044979af..384d68f7f49 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ env: jobs: quick-build: if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 steps: - name: Checkout repository @@ -43,7 +43,7 @@ jobs: mvn compiler:compile -Pdev,jdk11 -B -U -e check-format: if: github.event_name == 'pull_request' - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 steps: - name: Checkout repository @@ -68,7 +68,7 @@ jobs: run: | mvn spotless:check -Pdev,jdk11 -B -U -e prepare: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 outputs: stagingRepositoryId: ${{ steps.staging.outputs.stagingRepositoryId }} steps: @@ -85,7 +85,7 @@ jobs: echo "::set-output name=stagingRepositoryId::$STAGING_REPOSITORY_ID" linux-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 needs: prepare strategy: @@ -150,7 +150,7 @@ jobs: df -h macosx-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: macos-latest + runs-on: macos-10.15 needs: prepare strategy: matrix: @@ -186,7 +186,7 @@ jobs: df -h windows-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: windows-latest + runs-on: windows-2019 needs: prepare strategy: matrix: @@ -264,7 +264,7 @@ jobs: deploy: if: github.event_name == 'push' && contains(github.ref, 'master') needs: [linux-x86_64, macosx-x86_64, windows-x86_64] - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - name: Checkout repository uses: actions/checkout@v1 From 5596276cd4249d1c1f916648ecc822e64606ac26 Mon Sep 17 00:00:00 2001 From: Karl Lessard Date: Tue, 29 Mar 2022 07:53:46 -0400 Subject: [PATCH 21/21] Update install instructions for 0.4.1 --- docs/install.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/install.md b/docs/install.md index b102782ed4f..4691f4ffccd 100644 --- a/docs/install.md +++ b/docs/install.md @@ -64,7 +64,7 @@ For example, org.tensorflow tensorflow-core-platform - 0.3.3 + 0.4.1 ``` @@ -107,7 +107,7 @@ snapshots repository in your `pom.xml`. org.tensorflow tensorflow-core-platform - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT ``` @@ -124,7 +124,7 @@ repositories { } dependencies { - compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '0.3.3' + compile group: 'org.tensorflow', name: 'tensorflow-core-platform', version: '0.4.1' } ``` @@ -170,7 +170,7 @@ add the TensorFlow dependency to the project's `pom.xml` file: org.tensorflow tensorflow-core-platform - 0.3.3 + 0.4.1 @@ -195,8 +195,8 @@ public class HelloTensorFlow { try (ConcreteFunction dbl = ConcreteFunction.create(HelloTensorFlow::dbl); TInt32 x = TInt32.scalarOf(10); - Tensor dblX = dbl.call(x)) { - System.out.println(x.getInt() + " doubled is " + ((TInt32)dblX).getInt()); + TInt32 dblX = (TInt32)dbl.call(x)) { + System.out.println(x.getInt() + " doubled is " + dblX.getInt()); } }