E601 renamed output_dir to cache_dir · nipype/pydra@ac19f96 · GitHub
[go: up one dir, main page]

Skip to content

Commit ac19f96

Browse files
committed
renamed output_dir to cache_dir
1 parent ba08ee9 commit ac19f96

16 files changed

+80
-80
lines changed

docs/source/tutorial/2-advanced-execution.ipynb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@
238238
" result2 = submitter(mrgrid_varying_vox2)\n",
239239
"\n",
240240
"# Check that the output directory is the same for both runs\n",
241-
"assert result2.output_dir == result1.output_dir\n",
241+
"assert result2.cache_dir == result1.cache_dir\n",
242242
"\n",
243243
"# Change the voxel sizes to resample the NIfTI files to for one of the files\n",
244244
"mrgrid_varying_vox2.voxel[2] = [0.25]\n",
@@ -248,7 +248,7 @@
248248
" result3 = submitter(mrgrid_varying_vox2)\n",
249249
"\n",
250250
"# The output directory will be different as the inputs are now different\n",
251-
"assert result3.output_dir != result1.output_dir"
251+
"assert result3.cache_dir != result1.cache_dir"
252252
]
253253
},
254254
{
@@ -285,7 +285,7 @@
285285
"with submitter:\n",
286286
" result4 = submitter(mrgrid_varying_vox3)\n",
287287
"\n",
288-
"assert result4.output_dir == result1.output_dir\n",
288+
"assert result4.cache_dir == result1.cache_dir\n",
289289
"\n",
290290
"# Replace the first NIfTI file with a new file\n",
291291
"nifti_files[0] = Nifti1.sample(nifti_dir, seed=100)\n",
@@ -302,7 +302,7 @@
302302
" result4 = submitter(mrgrid_varying_vox4)\n",
303303
"\n",
304304
"# The cache directory for the new run is different\n",
305-
"assert result4.output_dir != result1.output_dir"
305+
"assert result4.cache_dir != result1.cache_dir"
306306
]
307307
},
308308
{
@@ -380,7 +380,7 @@
380380
"\n",
381381
"def notify_task_completion(task: Job, result: Result):\n",
382382
" # Print a message to the terminal\n",
383-
" print(f\"Job completed! Results are stored in {str(task.output_dir)!r}\")\n",
383+
" print(f\"Job completed! Results are stored in {str(task.cache_dir)!r}\")\n",
384384
"\n",
385385
" # Platform-specific notifications\n",
386386
" if platform.system() == \"Darwin\": # macOS\n",

docs/source/tutorial/3-troubleshooting.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@
205205
" with Submitter(worker=\"cf\") as sub:\n",
206206
" result = sub(wf)\n",
207207
"\n",
208-
"print(f\"Workflow completed successfully, results saved in: {result.output_dir}\")"
208+
"print(f\"Workflow completed successfully, results saved in: {result.cache_dir}\")"
209209
]
210210
},
211211
{

pydra/compose/base/task.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def _from_task(cls, job: "Job[TaskType]") -> Self:
7070
default = output.default
7171
defaults[output.name] = default
7272
outputs = cls(**defaults)
73-
outputs._output_dir = job.output_dir
73+
outputs._output_dir = job.cache_dir
7474
return outputs
7575

7676
@property
@@ -256,7 +256,7 @@ def __call__(
256256
f"Job {self} failed @ {time_of_crash} with the "
257257
f"following errors:\n{error_message}\n"
258258
"To inspect, please load the pickled job object from here: "
259-
f"{result.output_dir}/_job.pklz"
259+
f"{result.cache_dir}/_job.pklz"
260260
)
261261
return result.outputs
262262

pydra/compose/shell/task.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def _from_task(cls, job: "Job[Task]") -> ty.Self:
9090
):
9191
resolved_value = job.inputs[fld.name]
9292
elif is_set(fld.default):
93-
resolved_value = cls._resolve_default_value(fld, job.output_dir)
93+
resolved_value = cls._resolve_default_value(fld, job.cache_dir)
9494
else:
9595
resolved_value = cls._resolve_value(fld, job)
9696
# Set the resolved value
@@ -174,7 +174,7 @@ def _resolve_value(
174174
return template_update_single(
175175
fld,
176176
task=job.task,
177-
output_dir=job.output_dir,
177+
output_dir=job.cache_dir,
178178
spec_type="output",
179179
)
180180
assert fld.callable, (
@@ -192,7 +192,7 @@ def _resolve_value(
192192
if argnm == "field":
193193
call_args_val[argnm] = fld
194194
elif argnm == "output_dir":
195-
call_args_val[argnm] = job.output_dir
195+
call_args_val[argnm] = job.cache_dir
196196
elif argnm == "executable":
197197
call_args_val[argnm] = job.task.executable
198198
elif argnm == "inputs":

pydra/compose/tests/test_workflow_run.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2251,7 +2251,7 @@ def Worky2(x, y):
22512251
assert 2 == results2.outputs.out
22522252

22532253
# checking if both output_dirs are created
2254-
assert results1.output_dir != results2.output_dir
2254+
assert results1.cache_dir != results2.cache_dir
22552255

22562256

22572257
@pytest.mark.flaky(reruns=3)
@@ -2311,7 +2311,7 @@ def Worky2(x, y):
23112311
assert t2 < max(1, t1 - 1)
23122312

23132313
# checking if the second worky didn't run again
2314-
assert results1.output_dir != results2.output_dir
2314+
assert results1.cache_dir != results2.cache_dir
23152315

23162316

23172317
@pytest.mark.flaky(reruns=3)
@@ -2371,7 +2371,7 @@ def Worky2(x, y):
23712371
assert t2 < max(1, t1 - 1)
23722372

23732373
# both worky output_dirs should be created
2374-
assert results1.output_dir != results2.output_dir
2374+
assert results1.cache_dir != results2.cache_dir
23752375

23762376

23772377
@pytest.mark.flaky(reruns=3)
@@ -2427,7 +2427,7 @@ def Worky2(x, y):
24272427
assert t2 < max(1, t1 - 1)
24282428

24292429
# both worky output_dirs should be created
2430-
assert results1.output_dir != results2.output_dir
2430+
assert results1.cache_dir != results2.cache_dir
24312431

24322432

24332433
@pytest.mark.flaky(reruns=3)
@@ -2484,7 +2484,7 @@ def Worky2(x, y):
24842484
assert t2 > 2
24852485

24862486
# checking if the second worky didn't run again
2487-
assert results1.output_dir != results2.output_dir
2487+
assert results1.cache_dir != results2.cache_dir
24882488

24892489

24902490
@pytest.mark.flaky(reruns=3)
@@ -2538,7 +2538,7 @@ def Worky2(x, y):
25382538
assert 8 == results2.outputs.out
25392539

25402540
# checking if the second worky runs again
2541-
assert results1.output_dir != results2.output_dir
2541+
assert results1.cache_dir != results2.cache_dir
25422542

25432543
# everything has to be recomputed
25442544
assert len(list(Path(cache_root1).glob("python-*"))) == 2
@@ -2605,7 +2605,7 @@ def Worky2(x, y):
26052605
assert 8 == results2.outputs.out
26062606

26072607
# checking if the second worky runs again
2608-
assert results1.output_dir != results2.output_dir
2608+
assert results1.cache_dir != results2.cache_dir
26092609

26102610
# for win and dask/slurm the time for dir crea 10000 tion etc. might take much longer
26112611
if not sys.platform.startswith("win") and worker == "cf":
@@ -2676,7 +2676,7 @@ def Worky2(x, y):
26762676

26772677
assert 8 == results2.outputs.out
26782678

2679-
assert results1.output_dir != results2.output_dir
2679+
assert results1.cache_dir != results2.cache_dir
26802680
# the second task should be recomputed
26812681
assert len(list(Path(cache_root1).glob("python-*"))) == 2
26822682
assert len(list(Path(cache_root2).glob("python-*"))) == 1
@@ -2734,7 +2734,7 @@ def Worky2(x, y=None):
27342734
assert 12 == results2.outputs.out
27352735

27362736
# checking if the second worky runs again, but runs only one task
2737-
assert results1.output_dir != results2.output_dir
2737+
assert results1.cache_dir != results2.cache_dir
27382738
# the second worky should rerun one task
27392739
assert len(list(Path(cache_root1).glob("python-*"))) == 2
27402740
assert len(list(Path(cache_root2).glob("python-*"))) == 1
@@ -2785,7 +2785,7 @@ def Worky2(x, y=None):
27852785
assert 12 == results2.outputs.out
27862786

27872787
# checking if the second worky runs again, but runs only one task
2788-
assert results1.output_dir != results2.output_dir
2788+
assert results1.cache_dir != results2.cache_dir
27892789
# the second worky should have only one task run
27902790
assert len(list(Path(cache_root1).glob("python-*"))) == 2
27912791
assert len(list(Path(cache_root2).glob("python-*"))) == 1
@@ -2851,7 +2851,7 @@ def Worky2(x, y):
28512851
# checking if the second worky didn't run again
28522852
# checking all directories
28532853

2854-
assert results1.output_dir == results2.output_dir
2854+
assert results1.cache_dir == results2.cache_dir
28552855

28562856

28572857
@pytest.mark.flaky(reruns=3)
@@ -3083,7 +3083,7 @@ def Worky2(x, y):
30833083
assert t2 > 2
30843084

30853085
# checking if both worky run
3086-
assert results1.output_dir != results2.output_dir
3086+
assert results1.cache_dir != results2.cache_dir
30873087

30883088

30893089
@pytest.mark.flaky(reruns=3)
@@ -3133,7 +3133,7 @@ def Worky2(x, y):
31333133
assert 8 == results2.outputs.out
31343134

31353135
# checking if both dir exists
3136-
assert results1.output_dir != results2.output_dir
3136+
assert results1.cache_dir != results2.cache_dir
31373137

31383138
# the second worky should have only one task run
31393139
assert len(list(Path(cache_root1).glob("python-*"))) == 2
@@ -3449,7 +3449,7 @@ def Worky1(x, y):
34493449
assert 602 == results1.outputs.out[1]
34503450

34513451
# checkoing output_dir after the first run
3452-
assert results1.output_dir.exists()
3452+
assert results1.cache_dir.exists()
34533453

34543454
# saving the content of the cache dit after the first run
34553455
cache_root_content = os.listdir(results1.job.cache_root)

pydra/compose/workflow.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ def _from_task(cls, job: "Job[WorkflowTask]") -> ty.Self:
320320
f"Job {node.name!r} failed @ {time_of_crash} running "
321321
f"{node._task} with the following errors:\n{error_message}"
322322
"\nTo inspect, please load the pickled job object from here: "
323-
f"{result.output_dir}/_job.pklz"
323+
f"{result.cache_dir}/_job.pklz"
324324
)
325325
raise RuntimeError(
326326
f"Workflow {job!r} failed with errors:\n\n" + "\n\n".join(errors)
@@ -338,7 +338,7 @@ def _from_task(cls, job: "Job[WorkflowTask]") -> ty.Self:
338338
# Set the values in the outputs object
339339
outputs = super()._from_task(job)
340340
outputs = attrs.evolve(outputs, **values)
341-
outputs._output_dir = job.output_dir
341+
outputs._output_dir = job.cache_dir
342342
return outputs
343343

344344

pydra/engine/job.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ def checksum(self):
204204

205205
@property
206206
def lockfile(self):
207-
return self.output_dir.with_suffix(".lock")
207+
return self.cache_dir.with_suffix(".lock")
208208

209209
@property
210210 B41A
def uid(self):
@@ -225,7 +225,7 @@ def can_resume(self):
225225
return self._can_resume
226226

227227
@property
228-
def output_dir(self):
228+
def cache_dir(self):
229229
"""Get the filesystem path where outputs will be written."""
230230
return self.cache_root / self.checksum
231231

@@ -258,7 +258,7 @@ def inputs(self) -> dict[str, ty.Any]:
258258
if value and TypeParser.contains_type(FileSet, fld.type):
259259
copied_value = copy_nested_files(
260260
value=value,
261-
dest_dir=self.output_dir,
261+
dest_dir=self.cache_dir,
262262
mode=fld.copy_mode,
263263
collation=fld.copy_collation,
264264
supported_modes=self.SUPPORTED_COPY_MODES,
@@ -267,7 +267,7 @@ def inputs(self) -> dict[str, ty.Any]:
267267
map_copyfiles[name] = copied_value
268268
self._inputs.update(
269269
template_update(
270-
self.task, output_dir=self.output_dir, map_copyfiles=map_copyfiles
270+
self.task, output_dir=self.cache_dir, map_copyfiles=map_copyfiles
271271
)
272272
)
273273
return self._inputs
@@ -285,11 +285,11 @@ def _populate_filesystem(self):
285285
# and the lockfile has to be removed
286286
with open(self.cache_root / f"{self.uid}_info.json", "w") as jsonfile:
287287
json.dump({"checksum": self.checksum}, jsonfile)
288-
if not self.can_resume and self.output_dir.exists():
289-
shutil.rmtree(self.output_dir)
290-
self.output_dir.mkdir(parents=False, exist_ok=self.can_resume)
288+
if not self.can_resume and self.cache_dir.exists():
289+
shutil.rmtree(self.cache_dir)
290+
self.cache_dir.mkdir(parents=False, exist_ok=self.can_resume)
291291
# Save job pkl into the output directory for future reference
292-
save(self.output_dir, job=self)
292+
save(self.cache_dir, job=self)
293293

294294
def run(self, rerun: bool = False):
295295
"""Prepare the job working directory, execute the task, and save the
@@ -316,16 +316,16 @@ def run(self, rerun: bool = False):
316316
return result
317317
cwd = os.getcwd()
318318
self._populate_filesystem()
319-
os.chdir(self.output_dir)
319+
os.chdir(self.cache_dir)
320320
result = Result(
321321
outputs=None,
322322
runtime=None,
323323
errored=False,
324-
output_dir=self.output_dir,
324+
output_dir=self.cache_dir,
325325
task=self.task,
326326
)
327327
self.hooks.pre_run_task(self)
328-
self.audit.start_audit(odir=self.output_dir)
328+
self.audit.start_audit(odir=self.cache_dir)
329329
if self.audit.audit_check(AuditFlag.PROV):
330330
self.audit.audit_task(job=self)
331331
try:
@@ -335,13 +335,13 @@ def run(self, rerun: bool = False):
335335
except Exception:
336336
etype, eval, etr = sys.exc_info()
337337
traceback = format_exception(etype, eval, etr)
338-
record_error(self.output_dir, error=traceback)
338+
record_error(self.cache_dir, error=traceback)
339339
result.errored = True
340340
raise
341341
finally:
342342
self.hooks.post_run_task(self, result)
343343
self.audit.finalize_audit(result=result)
344-
save(self.output_dir, result=result, job=self)
344+
save(self.cache_dir, result=result, job=self)
345345
# removing the additional file with the checksum
346346
(self.cache_root / f"{self.uid}_info.json").unlink()
347347
os.chdir(cwd)
@@ -377,26 +377,26 @@ async def run_async(self, rerun: bool = False) -> Result:
377377
outputs=None,
378378
runtime=None,
379379
errored=False,
380-
output_dir=self.output_dir,
380+
output_dir=self.cache_dir,
381381
task=self.task,
382382
)
383383
self.hooks.pre_run_task(self)
384-
self.audit.start_audit(odir=self.output_dir)
384+
self.audit.start_audit(odir=self.cache_dir)
385385
try:
386386
self.audit.monitor()
387387
await self.task._run_async(self, rerun)
388388
result.outputs = self.task.Outputs._from_task(self)
389389
except Exception:
390390
etype, eval, etr = sys.exc_info()
391391
traceback = format_exception(etype, eval, etr)
392-
record_error(self.output_dir, error=traceback)
392+
record_error(self.cache_dir, error=traceback)
393393
result.errored = True
394394
self._errored = True
395395
raise
396396
finally:
397397
self.hooks.post_run_task(self, result)
398398
self.audit.finalize_audit(result=result)
399-
save(self.output_dir, result=result, job=self)
399+
save(self.cache_dir, result=result, job=self)
400400
# removing the additional file with the checksum
401401
(self.cache_root / f"{self.uid}_info.json").unlink()
402402
os.chdir(cwd)
@@ -483,7 +483,7 @@ def result(self, return_inputs=False):
483483
outputs=None,
484484
runtime=None,
485485
errored=True,
486-
output_dir=self.output_dir,
486+
output_dir=self.cache_dir,
487487
task=self.task,
488488
)
489489

@@ -614,22 +614,22 @@ def load_and_run(job_pkl: Path, rerun: bool = False) -> Path:
614614
save(job_pkl.parent, result=result)
615615
raise
616616

617-
resultfile = job.output_dir / "_result.pklz"
617+
resultfile = job.cache_dir / "_result.pklz"
618618
try:
619619
if job.is_async:
620620
job.submitter.submit(job, rerun=rerun)
621621
else:
622622
job.run(rerun=rerun)
623623
except Exception as e:
624624
# creating result and error files if missing
625-
errorfile = job.output_dir / "_error.pklz"
625+
errorfile = job.cache_dir / "_error.pklz"
626626
if not errorfile.exists(): # not sure if this is needed
627627
etype, eval, etr = sys.exc_info()
628628
traceback = format_exception(etype, eval, etr)
629-
errorfile = record_error(job.output_dir, error=traceback)
629+
errorfile = record_error(job.cache_dir, error=traceback)
630630
if not resultfile.exists(): # not sure if this is needed
631631
result = Result(output=None, runtime=None, errored=True, task=None)
632-
save(job.output_dir, result=result)
632+
save(job.cache_dir, result=result)
633633
e.add_note(f" full crash report is here: {errorfile}")
634634
raise
635635
return resultfile

0 commit comments

Comments
 (0)
0