8000 feat: add Dataproc Serverless for Spark Batches API (#290) · googleapis/python-dataproc@f0ed26c · GitHub
[go: up one dir, main page]

Skip to content
This repository was archived by the owner on Nov 29, 2023. It is now read-only.

Commit f0ed26c

Browse files
feat: add Dataproc Serverless for Spark Batches API (#290)
* feat: add Dataproc Serverless for Spark Batches API Committer: @medb PiperOrigin-RevId: 402631995 Source-Link: googleapis/googleapis@95af2e4 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0ee7abd9ecd2951e958303681a4b251a948107b6 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGVlN2FiZDllY2QyOTUxZTk1ODMwMzY4MWE0YjI1MWE5NDgxMDdiNiJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 15a4471 commit f0ed26c

File tree

13 files changed

+321
-40
lines changed

13 files changed

+321
-40
lines changed

google/cloud/dataproc/__init__.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
from google.cloud.dataproc_v1.types.clusters import ClusterConfig
7272
from google.cloud.dataproc_v1.types.clusters import ClusterMetrics
7373
from google.cloud.dataproc_v1.types.clusters import ClusterStatus
74+
from google.cloud.dataproc_v1.types.clusters import ConfidentialInstanceConfig
7475
from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest
7576
from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest
7677
from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest
@@ -122,9 +123,17 @@
122123
from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest
123124
from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest
124125
from google.cloud.dataproc_v1.types.jobs import YarnApplication
126+
from google.cloud.dataproc_v1.types.operations import BatchOperationMetadata
125127
from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata
126128
from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus
129+
from google.cloud.dataproc_v1.types.shared import EnvironmentConfig
130+
from google.cloud.dataproc_v1.types.shared import ExecutionConfig
131+
from google.cloud.dataproc_v1.types.shared import PeripheralsConfig
132+
from google.cloud.dataproc_v1.types.shared import RuntimeConfig
133+
from google.cloud.dataproc_v1.types.shared import RuntimeInfo
134+
from google.cloud.dataproc_v1.types.shared import SparkHistoryServerConfig
127135
from google.cloud.dataproc_v1.types.shared import Component
136+
from google.cloud.dataproc_v1.types.shared import FailureAction
128137
from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation
129138
from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector
130139
from google.cloud.dataproc_v1.types.workflow_templates import (
@@ -186,6 +195,7 @@
186195
"ClusterConfig",
187196
"ClusterMetrics",
188197
"ClusterStatus",
198+
"ConfidentialInstanceConfig",
189199
"CreateClusterRequest",
190200
"DeleteClusterRequest",
191201
"DiagnoseClusterRequest",
@@ -237,9 +247,17 @@
237247
"SubmitJobRequest",
238248
"UpdateJobRequest",
239249
"YarnApplication",
250+
"BatchOperationMetadata",
240251
"ClusterOperationMetadata",
241252
"ClusterOperationStatus",
253+
"EnvironmentConfig",
254+
"ExecutionConfig",
255+
"PeripheralsConfig",
256+
"RuntimeConfig",
257+
"RuntimeInfo",
258+
"SparkHistoryServerConfig",
242259
"Component",
260+
"FailureAction",
243261
"ClusterOperation",
244262
"ClusterSelector",
245263
"CreateWorkflowTemplateRequest",

google/cloud/dataproc_v1/__init__.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
from .types.clusters import ClusterConfig
4040
from .types.clusters import ClusterMetrics
4141
from .types.clusters import ClusterStatus
42+
from .types.clusters import ConfidentialInstanceConfig
4243
from .types.clusters import CreateClusterRequest
4344
from .types.clusters import DeleteClusterRequest
4445
from .types.clusters import DiagnoseClusterRequest
@@ -90,9 +91,17 @@
9091
from .types.jobs import SubmitJobRequest
9192
from .types.jobs import UpdateJobRequest
9293
from .types.jobs import YarnApplication
94+
from .types.operations import BatchOperationMetadata
9395
from .types.operations import ClusterOperationMetadata
9496
from .types.operations import ClusterOperationStatus
97+
from .types.shared import EnvironmentConfig
98+
from .types.shared import ExecutionConfig
99+
from .types.shared import PeripheralsConfig
100+
from .types.shared import RuntimeConfig
101+
from .types.shared import RuntimeInfo
102+
from .types.shared import SparkHistoryServerConfig
95103
from .types.shared import Component
104+
from .types.shared import FailureAction
96105
from .types.workflow_templates import ClusterOperation
97106
from .types.workflow_templates import ClusterSelector
98107
from .types.workflow_templates import CreateWorkflowTemplateRequest
@@ -126,6 +135,7 @@
126135
"AutoscalingPolicyServiceClient",
127136
"BasicAutoscalingAlgorithm",
128137
"BasicYarnAutoscalingConfig",
138+
"BatchOperationMetadata",
129139
"CancelJobRequest",
130140
"Cluster",
131141
"ClusterConfig",
@@ -137,6 +147,7 @@
137147
"ClusterSelector",
138148
"ClusterStatus",
139149
"Component",
150+
"ConfidentialInstanceConfig",
140151
"CreateAutoscalingPolicyRequest",
141152
"CreateClusterRequest",
142153
"CreateWorkflowTemplateRequest",
@@ -149,6 +160,9 @@
149160
"DiskConfig",
150161
"EncryptionConfig",
151162
"EndpointConfig",
163+
"EnvironmentConfig",
164+
"ExecutionConfig",
165+
"FailureAction",
152166
"GceClusterConfig",
153167
"GetAutoscalingPolicyRequest",
154168
"GetClusterRequest",
@@ -187,15 +201,19 @@
187201
"NodeInitializationAction",
188202
"OrderedJob",
189203
"ParameterValidation",
204+
"PeripheralsConfig",
190205
"PigJob",
191206
"PrestoJob",
192207
"PySparkJob",
193208
"QueryList",
194209
"RegexValidation",
195210
"ReservationAffinity",
211+
"RuntimeConfig",
212+
"RuntimeInfo",
196213
"SecurityConfig",
197214
"ShieldedInstanceConfig",
198215
"SoftwareConfig",
216+
"SparkHistoryServerConfig",
199217
"SparkJob",
200218
"SparkRJob",
201219
"SparkSqlJob",

google/cloud/dataproc_v1/services/cluster_controller/async_client.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ async def create_cluster(
220220
An object representing a long-running operation.
221221
222222
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
223-
a cluster of Compute Engine instances.
223+
a Dataproc cluster
224224
225225
"""
226226
# Create or coerce a protobuf request object.
@@ -292,6 +292,9 @@ async def update_cluster(
292292
[Operation.metadata][google.longrunning.Operation.metadata] will
293293
be
294294
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
295+
The cluster must be in a
296+
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
297+
state or an error is returned.
295298
296299
Args:
297300
request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`):
@@ -398,7 +401,7 @@ async def update_cluster(
398401
An object representing a long-running operation.
399402
400403
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
401-
a cluster of Compute Engine instances.
404+
a Dataproc cluster
402405
403406
"""
404407
# Create or coerce a protobuf request object.
@@ -483,7 +486,7 @@ async def stop_cluster(
483486
An object representing a long-running operation.
484487
485488
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
486-
a cluster of Compute Engine instances.
489+
a Dataproc cluster
487490
488491
"""
489492
# Create or coerce a protobuf request object.
@@ -535,7 +538,7 @@ async def start_cluster(
535538
An object representing a long-running operation.
536539
537540
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
538-
a cluster of Compute Engine instances.
541+
a Dataproc cluster
539542
540543
"""
541544
# Create or coerce a protobuf request object.
@@ -727,7 +730,7 @@ async def get_cluster(
727730
google.cloud.dataproc_v1.types.Cluster:
728731
Describes the identifying
729732
information, config, and status of a
730-
cluster of Compute Engine instances.
733+
Dataproc cluster
731734
732735
"""
733736
# Create or coerce a protobuf request object.

google/cloud/dataproc_v1/services/cluster_controller/client.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ def create_cluster(
422422
An object representing a long-running operation.
423423
424424
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
425-
a cluster of Compute Engine instances.
425+
a Dataproc cluster
426426
427427
"""
428428
# Create or coerce a protobuf request object.
@@ -485,6 +485,9 @@ def update_cluster(
485485
[Operation.metadata][google.longrunning.Operation.metadata] will
486486
be
487487
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
488+
The cluster must be in a
489+
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
490+
state or an error is returned.
488491
489492
Args:
490493
request (Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]):
@@ -591,7 +594,7 @@ def update_cluster(
591594
An object representing a long-running operation.
592595
593596
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
594-
a cluster of Compute Engine instances.
597+
a Dataproc cluster
595598
596599
"""
597600
# Create or coerce a protobuf request object.
@@ -667,7 +670,7 @@ def stop_cluster(
667670
An object representing a long-running operation.
668671
669672
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
670-
a cluster of Compute Engine instances.
673+
a Dataproc cluster
671674
672675
"""
673676
# Create or coerce a protobuf request object.
@@ -720,7 +723,7 @@ def start_cluster(
720723
An object representing a long-running operation.
721724
722725
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
723-
a cluster of Compute Engine instances.
726+
a Dataproc cluster
724727
725728
"""
726729
# Create or coerce a protobuf request object.
@@ -904,7 +907,7 @@ def get_cluster(
904907
google.cloud.dataproc_v1.types.Cluster:
905908
Describes the identifying
906909
information, config, and status of a
907-
cluster of Compute Engine instances.
910+
Dataproc cluster
908911
909912
"""
910913
# Create or coerce a protobuf request object.

google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,9 @@ def update_cluster(
282282
[Operation.metadata][google.longrunning.Operation.metadata] will
283283
be
284284
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
285+
The cluster must be in a
286+
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
287+
state or an error is returned.
285288
286289
Returns:
287290
Callable[[~.UpdateClusterRequest],

google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,9 @@ def update_cluster(
287287
[Operation.metadata][google.longrunning.Operation.metadata] will
288288
be
289289
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
290+
The cluster must be in a
291+
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
292+
state or an error is returned.
290293
291294
Returns:
292295
Callable[[~.UpdateClusterRequest],

google/cloud/dataproc_v1/types/__init__.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
ClusterConfig,
3333
ClusterMetrics,
3434
ClusterStatus,
35+
ConfidentialInstanceConfig,
3536
CreateClusterRequest,
3637
DeleteClusterRequest,
3738
DiagnoseClusterRequest,
@@ -87,9 +88,20 @@
8788
YarnApplication,
8889
)
8990
from .operations import (
91+
BatchOperationMetadata,
9092
ClusterOperationMetadata,
9193
ClusterOperationStatus,
9294
)
95+
from .shared import (
96+
EnvironmentConfig,
97+
ExecutionConfig,
98+
PeripheralsConfig,
99+
RuntimeConfig,
100+
RuntimeInfo,
101+
SparkHistoryServerConfig,
102+
Component,
103+
FailureAction,
104+
)
93105
from .workflow_templates import (
94106
ClusterOperation,
95107
ClusterSelector,
@@ -131,6 +143,7 @@
131143
"ClusterConfig",
132144
"ClusterMetrics",
133145
"ClusterStatus",
146+
"ConfidentialInstanceConfig",
134147
"CreateClusterRequest",
135148
"DeleteClusterRequest",
136149
"DiagnoseClusterRequest",
@@ -182,9 +195,17 @@
182195
"SubmitJobRequest",
183196
"UpdateJobRequest",
184197
"YarnApplication",
198+
"BatchOperationMetadata",
185199
"ClusterOperationMetadata",
186200
"ClusterOperationStatus",
201+
"EnvironmentConfig",
202+
"ExecutionConfig",
203+
"PeripheralsConfig",
204+
"RuntimeConfig",
205+
"RuntimeInfo",
206+
"SparkHistoryServerConfig",
187207
"Component",
208+
"FailureAction",
188209
"ClusterOperation",
189210
"ClusterSelector",
190211
"CreateWorkflowTemplateRequest",

google/cloud/dataproc_v1/types/autoscaling_policies.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,15 @@ class AutoscalingPolicy(proto.Message):
6767
secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig):
6868
Optional. Describes how the autoscaler will
6969
operate for secondary workers.
70+
labels (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy.LabelsEntry]):
71+
Optional. The labels to associate with this autoscaling
72+
policy. Label **keys** must contain 1 to 63 characters, and
73+
must conform to `RFC
74+
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. Label
75+
**values** may be empty, but, if present, must contain 1 to
76+
63 characters, and must conform to `RFC
77+
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. No more than
78+
32 labels can be associated with an autoscaling policy.
7079
"""
7180

7281
id = proto.Field(proto.STRING, number=1,)
@@ -80,6 +89,7 @@ class AutoscalingPolicy(proto.Message):
8089
secondary_worker_config = proto.Field(
8190
proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig",
8291
)
92+
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
8393

8494

8595
class BasicAutoscalingAlgorithm(proto.Message):

0 commit comments

Comments
 (0)
0