22
22
import signal
23
23
import sys
24
24
25
- from google .cloud import credentials
26
- from google .cloud .grpc .speech .v1beta1 import cloud_speech_pb2 as cloud_speech
25
+
26
+ import google .auth
27
+ import google .auth .transport .grpc
28
+ import google .auth .transport .requests
29
+ from google .cloud .grpc .speech .v1beta1 import cloud_speech_pb2
27
30
from google .rpc import code_pb2
28
- from grpc .beta import implementations
29
31
from grpc .framework .interfaces .face import face
30
32
import pyaudio
31
33
from six .moves import queue
43
45
44
46
45
47
def make_channel (host , port ):
46
- """Creates an SSL channel with auth credentials from the environment."""
47
- # In order to make an https call, use an ssl channel with defaults
48
- ssl_channel = implementations .ssl_channel_credentials (None , None , None )
49
-
48
+ """Creates a secure channel with auth credentials from the environment."""
50
49
# Grab application default credentials from the environment
51
- creds = credentials .get_credentials ().create_scoped ([SPEECH_SCOPE ])
52
- # Add a plugin to inject the creds into the header
53
- auth_header = (
54
- 'Authorization' ,
55
- 'Bearer ' + creds .get_access_token ().access_token )
56
- auth_plugin = implementations .metadata_call_credentials (
57
- lambda _ , cb : cb ([auth_header ], None ),
58
- name = 'google_creds' )
50
+ credentials , _ = google .auth .default (scopes = [SPEECH_SCOPE ])
59
51
60
- # compose the two together for both ssl and google auth
61
- composite_channel = implementations . composite_channel_credentials (
62
- ssl_channel , auth_plugin )
52
+ # Create a secure channel using the credentials.
53
+ http_request = google . auth . transport . requests . Request ()
54
+ target = '{}:{}' . format ( host , port )
63
55
64
- return implementations .secure_channel (host , port , composite_channel )
56
+ return google .auth .transport .grpc .secure_authorized_channel (
57
+ credentials , http_request , target )
65
58
66
59
67
60
def _audio_data_generator (buff ):
@@ -142,7 +135,7 @@ def request_stream(data_stream, rate, interim_results=True):
142
135
"""
143
136
# The initial request must contain metadata about the stream, so the
144
137
# server knows how to interpret it.
145
- recognition_config = cloud_speech .RecognitionConfig (
138
+ recognition_config = cloud_speech_pb2 .RecognitionConfig (
146
139
# There are a bunch of config options you can specify. See
147
140
# https://goo.gl/KPZn97 for the full list.
148
141
encoding = 'LINEAR16' , # raw 16-bit signed LE samples
@@ -151,17 +144,17 @@ def request_stream(data_stream, rate, interim_results=True):
151
144
# for a list of supported languages.
152
145
language_code = 'en-US' , # a BCP-47 language tag
153
146
)
154
- streaming_config = cloud_speech .StreamingRecognitionConfig (
147
+ streaming_config = cloud_speech_pb2 .StreamingRecognitionConfig (
155
148
interim_results = interim_results ,
156
149
config = recognition_config ,
157
150
)
158
151
159
- yield cloud_speech .StreamingRecognizeRequest (
152
+ yield cloud_speech_pb2 .StreamingRecognizeRequest (
160
153
streaming_config = streaming_config )
161
154
162
155
for data in data_stream :
163
156
# Subsequent requests can all just have the content
164
- yield cloud_speech .StreamingRecognizeRequest (audio_content = data )
157
+ yield cloud_speech_pb2 .StreamingRecognizeRequest (audio_content = data )
165
158
166
159
167
160
def listen_print_loop (recognize_stream ):
@@ -212,28 +205,29 @@ def listen_print_loop(recognize_stream):
212
205
213
206
214
207
def main ():
215
- with cloud_speech .beta_create_Speech_stub (
216
- make_channel ('speech.googleapis.com' , 443 )) as service :
217
- # For streaming audio from the microphone, there are three threads.
218
- # First, a thread that collects audio data as it comes in
219
- with record_audio (RATE , CHUNK ) as buffered_audio_data :
220
- # Second, a thread that sends requests with that data
221
- requests = request_stream (buffered_audio_data , RATE )
222
- # Third, a thread that listens for transcription responses
223
- recognize_stream = service .StreamingRecognize (
224
- requests , DEADLINE_SECS )
225
-
226
- # Exit things cleanly on interrupt
227
- signal .signal (signal .SIGINT , lambda * _ : recognize_stream .cancel ())
228
-
229
- # Now, put the transcription responses to use.
230
- try :
231
- listen_print_loop (recognize_stream )
232
-
233
- recognize_stream .cancel ()
234
- except face .CancellationError :
235
- # This happens because of the interrupt handler
236
- pass
208
+ service = cloud_speech_pb2 .SpeechStub (
209
+ make_channel ('speech.googleapis.com' , 443 ))
210
+
211
+ # For streaming audio from the microphone, there are three threads.
212
+ # First, a thread that collects audio data as it comes in
213
+ with record_audio (RATE , CHUNK ) as buffered_audio_data :
214
+ # Second, a thread that sends requests with that data
215
+ requests = request_stream (buffered_audio_data , RATE )
216
+ # Third, a thread that listens for transcription responses
217
+ recognize_stream = service .StreamingRecognize (
218
+ requests , DEADLINE_SECS )
219
+
220
+ # Exit things cleanly on interrupt
221
+ signal .signal (signal .SIGINT , lambda * _ : recognize_stream .cancel ())
222
+
223
+ # Now, put the transcription responses to use.
224
+ try :
225
+ listen_print_loop (recognize_stream )
226
+
227
+ recognize_stream .cancel ()
228
+ except face .CancellationError :
229
+ # This happens because of the interrupt handler
230
+ pass
237
231
238
232
239
233
if __name__ == '__main__' :
0 commit comments