31
31
)
32
32
from ...models import ConnectionCredentials , RevisionItem
33
33
from ...models .job_item import JobItem
34
- from ...models import ConnectionCredentials
35
34
36
- io_types = (io .BytesIO , io .BufferedReader )
37
-
38
- from pathlib import Path
39
- from typing import (
40
- List ,
41
- Mapping ,
42
- Optional ,
43
- Sequence ,
44
- Tuple ,
45
- TYPE_CHECKING ,
46
- Union ,
47
- )
48
-
49
- io_types = (io .BytesIO , io .BufferedReader )
35
+ io_types_r = (io .BytesIO , io .BufferedReader )
36
+ io_types_w = (io .BytesIO , io .BufferedWriter )
50
37
51
38
# The maximum size of a file that can be published in a single request is 64MB
52
39
FILESIZE_LIMIT = 1024 * 1024 * 64 # 64MB
61
48
from .schedules_endpoint import AddResponse
62
49
63
50
FilePath = Union [str , os .PathLike ]
64
- FileObject = Union [io .BufferedReader , io .BytesIO ]
65
- PathOrFile = Union [FilePath , FileObject ]
51
+ FileObjectR = Union [io .BufferedReader , io .BytesIO ]
52
+ FileObjectW = Union [io .BufferedWriter , io .BytesIO ]
53
+ PathOrFileR = Union [FilePath , FileObjectR ]
54
+ PathOrFileW = Union [FilePath , FileObjectW ]
66
55
67
56
68
57
class Datasources (QuerysetEndpoint ):
@@ -80,7 +69,7 @@ def baseurl(self) -> str:
80
69
81
70
# Get all datasources
82
71
@api (version = "2.0" )
83
- def get (self , req_options : RequestOptions = None ) -> Tuple [List [DatasourceItem ], PaginationItem ]:
72
+ def get (self , req_options : Optional [ RequestOptions ] = None ) -> Tuple [List [DatasourceItem ], PaginationItem ]:
84
73
logger .info ("Querying all datasources on site" )
85
74
url = self .baseurl
86
75
server_response = self .get_request (url , req_options )
@@ -135,39 +124,11 @@ def delete(self, datasource_id: str) -> None:
135
124
def download (
136
125
self ,
137
126
datasource_id : str ,
138
- filepath : FilePath = None ,
127
+ filepath : Optional [ PathOrFileW ] = None ,
139
128
include_extract : bool = True ,
140
129
no_extract : Optional [bool ] = None ,
141
130
) -> str :
142
- if not datasource_id :
143
- error = "Datasource ID undefined."
144
- raise ValueError (error )
145
- url = "{0}/{1}/content" .format (self .baseurl , datasource_id )
146
-
147
- if no_extract is False or no_extract is True :
148
- import warnings
149
-
150
- warnings .warn (
151
- "no_extract is deprecated, use include_extract instead." ,
152
- DeprecationWarning ,
153
- )
154
- include_extract = not no_extract
155
-
156
- if not include_extract :
157
- url += "?includeExtract=False"
158
-
159
- with closing (self .get_request (url , parameters = {"stream" : True })) as server_response :
160
- _ , params = cgi .parse_header (server_response .headers ["Content-Disposition" ])
161
- filename = to_filename (os .path .basename (params ["filename" ]))
162
-
163
- download_path = make_download_path (filepath , filename )
164
-
165
- with open (download_path , "wb" ) as f :
166
- for chunk in server_response .iter_content (1024 ): # 1KB
167
- f .write (chunk )
168
-
169
- logger .info ("Downloaded datasource to {0} (ID: {1})" .format (download_path , datasource_id ))
170
- return os .path .abspath (download_path )
131
+ return self .download_revision (datasource_id , None , filepath , include_extract , no_extract )
171
132
172
133
# Update datasource
173
134
@api (version = "2.0" )
@@ -232,10 +193,10 @@ def delete_extract(self, datasource_item: DatasourceItem) -> None:
232
193
def publish (
233
194
self ,
234
195
datasource_item : DatasourceItem ,
235
- file : PathOrFile ,
196
+ file : PathOrFileR ,
236
197
mode : str ,
237
- connection_credentials : ConnectionCredentials = None ,
238
- connections : Sequence [ConnectionItem ] = None ,
198
+ connection_credentials : Optional [ ConnectionCredentials ] = None ,
199
+ connections : Optional [ Sequence [ConnectionItem ] ] = None ,
239
200
as_job : bool = False ,
240
201
) -> Union [DatasourceItem , JobItem ]:
241
202
@@ -255,8 +216,7 @@ def publish(
255
216
error = "Only {} files can be published as datasources." .format (", " .join (ALLOWED_FILE_EXTENSIONS ))
256
217
raise ValueError (error )
257
218
258
- elif isinstance (file , io_types ):
259
-
219
+ elif isinstance (file , io_types_r ):
260
220
if not datasource_item .name :
261
221
error = "Datasource item must have a name when passing a file object"
262
222
raise ValueError (error )
@@ -302,7 +262,7 @@ def publish(
302
262
if isinstance (file , (Path , str )):
303
263
with open (file , "rb" ) as f :
304
264
file_contents = f .read ()
305
- elif isinstance (file , io_types ):
265
+ elif isinstance (file , io_types_r ):
306
266
file_contents = file .read ()
307
267
else :
308
268
raise TypeError ("file should be a filepath or file object." )
@@ -433,14 +393,17 @@ def download_revision(
433
393
self ,
434
394
datasource_id : str ,
435
395
revision_number : str ,
436
- filepath : Optional [PathOrFile ] = None ,
396
+ filepath : Optional [PathOrFileW ] = None ,
437
397
include_extract : bool = True ,
438
398
no_extract : Optional [bool ] = None ,
439
- ) -> str :
399
+ ) -> PathOrFileW :
440
400
if not datasource_id :
441
401
error = "Datasource ID undefined."
442
402
raise ValueError (error )
443
- url = "{0}/{1}/revisions/{2}/content" .format (self .baseurl , datasource_id , revision_number )
403
+ if revision_number is None :
404
+ url = "{0}/{1}/content" .format (self .baseurl , datasource_id )
405
+ else :
406
+ url = "{0}/{1}/revisions/{2}/content" .format (self .baseurl , datasource_id , revision_number )
444
407
if no_extract is False or no_extract is True :
445
408
import warnings
446
409
@@ -455,18 +418,22 @@ def download_revision(
455
418
456
419
with closing (self .get_request (url , parameters = {"stream" : True })) as server_response :
457
420
_ , params = cgi .parse_header (server_response .headers ["Content-Disposition" ])
458
- filename = to_filename (os .path .basename (params ["filename" ]))
459
-
460
- download_path = make_download_path (filepath , filename )
461
-
462
- with open (download_path , "wb" ) as f :
421
+ if isinstance (filepath , io_types_w ):
463
422
for chunk in server_response .iter_content (1024 ): # 1KB
464
- f .write (chunk )
423
+ filepath .write (chunk )
424
+ return_path = filepath
425
+ else :
426
+ filename = to_filename (os .path .basename (params ["filename" ]))
427
+ download_path = make_download_path (filepath , filename )
428
+ with open (download_path , "wb" ) as f :
429
+ for chunk in server_response .iter_content (1024 ): # 1KB
430
+ f .write (chunk )
431
+ return_path = os .path .abspath (download_path )
465
432
466
433
logger .info (
467
- "Downloaded datasource revision {0} to {1} (ID: {2})" .format (revision_number , download_path , datasource_id )
434
+ "Downloaded datasource revision {0} to {1} (ID: {2})" .format (revision_number , return_path , datasource_id )
468
435
)
469
- return os . path . abspath ( download_path )
436
+ return return_path
470
437
471
438
@api (version = "2.3" )
472
439
def delete_revision (self , datasource_id : str , revision_number : str ) -> None :
0 commit comments